Merge tag 'drm-intel-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-intel into drm-next

4 weeks worth of stuff since I was traveling&lazy:

- lspcon improvements (Imre)
- proper atomic state for cdclk handling (Ville)
- gpu reset improvements (Chris)
- lots and lots of polish around fences, requests, waiting and
  everything related all over (both gem and modeset code), from Chris
- atomic by default on gen5+ minus byt/bsw (Maarten did the patch to
  flip the default, really this is a massive joint team effort)
- moar power domains, now 64bit (Ander)
- big pile of in-kernel unit tests for various gem subsystems (Chris),
  including simple mock objects for i915 device and and the ggtt
  manager.
- i915_gpu_info in debugfs, for taking a snapshot of the current gpu
  state. Same thing as i915_error_state, but useful if the kernel didn't
  notice something is stick. From Chris.
- bxt dsi fixes (Umar Shankar)
- bxt w/a updates (Jani)
- no more struct_mutex for gem object unreference (Chris)
- some execlist refactoring (Tvrtko)
- color manager support for glk (Ander)
- improve the power-well sync code to better take over from the
  firmware (Imre)
- gem tracepoint polish (Tvrtko)
- lots of glk fixes all around (Ander)
- ctx switch improvements (Chris)
- glk dsi support&fixes (Deepak M)
- dsi fixes for vlv and clanups, lots of them (Hans de Goede)
- switch to i915.ko types in lots of our internal modeset code (Ander)
- byt/bsw atomic wm update code, yay (Ville)

* tag 'drm-intel-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-intel: (432 commits)
  drm/i915: Update DRIVER_DATE to 20170306
  drm/i915: Don't use enums for hardware engine id
  drm/i915: Split breadcrumbs spinlock into two
  drm/i915: Refactor wakeup of the next breadcrumb waiter
  drm/i915: Take reference for signaling the request from hardirq
  drm/i915: Add FIFO underrun tracepoints
  drm/i915: Add cxsr toggle tracepoint
  drm/i915: Add VLV/CHV watermark/FIFO programming tracepoints
  drm/i915: Add plane update/disable tracepoints
  drm/i915: Kill level 0 wm hack for VLV/CHV
  drm/i915: Workaround VLV/CHV sprite1->sprite0 enable underrun
  drm/i915: Sanitize VLV/CHV watermarks properly
  drm/i915: Only use update_wm_{pre,post} for pre-ilk platforms
  drm/i915: Nuke crtc->wm.cxsr_allowed
  drm/i915: Compute proper intermediate wms for vlv/cvh
  drm/i915: Skip useless watermark/FIFO related work on VLV/CHV when not needed
  drm/i915: Compute vlv/chv wms the atomic way
  drm/i915: Compute VLV/CHV FIFO sizes based on the PM2 watermarks
  drm/i915: Plop vlv/chv fifo sizes into crtc state
  drm/i915: Plop vlv wm state into crtc_state
  ...
This commit is contained in:
Dave Airlie 2017-03-08 12:41:47 +10:00
Родитель b558dfd56a 505b681539
Коммит 2e16101780
133 изменённых файлов: 18861 добавлений и 8253 удалений

Просмотреть файл

@ -222,6 +222,15 @@ Video BIOS Table (VBT)
.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
:internal:
Display clocks
--------------
.. kernel-doc:: drivers/gpu/drm/i915/intel_cdclk.c
:doc: CDCLK / RAWCLK
.. kernel-doc:: drivers/gpu/drm/i915/intel_cdclk.c
:internal:
Display PLLs
------------

Просмотреть файл

@ -526,6 +526,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
};
static void __init

Просмотреть файл

@ -332,14 +332,6 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
{32, 8192, 3},
{64, 16384, 4},
{128, 32768, 5},
{256, 65536, 6},
{512, 131072, 7},
};
static unsigned int intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@ -670,6 +662,14 @@ static int intel_gtt_init(void)
}
#if IS_ENABLED(CONFIG_AGP_INTEL)
static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
{32, 8192, 3},
{64, 16384, 4},
{128, 32768, 5},
{256, 65536, 6},
{512, 131072, 7},
};
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);

Просмотреть файл

@ -19,6 +19,7 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,

Просмотреть файл

@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_MM_SELFTEST
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
help
Choose this option to turn on extra driver debugging that may affect
@ -58,3 +60,30 @@ config DRM_I915_SW_FENCE_DEBUG_OBJECTS
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
default n
select FAULT_INJECTION
select PRIME_NUMBERS
help
Choose this option to allow the driver to perform selftests upon
loading; also requires the i915.selftest=1 module parameter. To
exit the module after running the selftests (i.e. to prevent normal
module initialisation afterwards) use i915.selftest=-1.
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
bool "Enable low level request tracing events"
depends on DRM_I915
default n
help
Choose this option to turn on low level request tracing events.
This provides the ability to precisely monitor engine utilisation
and also analyze the request dependency resolving timeline.
If in doubt, say "N".

Просмотреть файл

@ -29,6 +29,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_clflush.o \
i915_gem_context.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
@ -72,6 +73,7 @@ i915-y += intel_audio.o \
intel_atomic.o \
intel_atomic_plane.o \
intel_bios.o \
intel_cdclk.o \
intel_color.o \
intel_display.o \
intel_dpio_phy.o \
@ -116,6 +118,9 @@ i915-y += dvo_ch7017.o \
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o
# virtual gpu code
i915-y += i915_vgpu.o

Просмотреть файл

@ -1530,7 +1530,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
len += copy_len;
gma += copy_len;
}
return 0;
return len;
}
@ -1644,7 +1644,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
if (ret) {
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
@ -2608,11 +2608,8 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
unsigned int copy_len = 0;
u32 *cs;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@ -2626,36 +2623,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
ret = intel_ring_begin(workload->req, workload->rb_len / 4);
if (ret)
return ret;
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs))
return PTR_ERR(cs);
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
workload->shadow_ring_buffer_va = cs;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
gma_head, gma_top, cs);
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
cs += ret / sizeof(u32);
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
intel_ring_advance(ring);
cs += ret / sizeof(u32);
intel_ring_advance(workload->req, cs);
return 0;
}
@ -2709,7 +2703,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
if (ret) {
if (ret < 0) {
gvt_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}

Просмотреть файл

@ -35,6 +35,23 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
return to_i915(node->minor->dev);
}
static __always_inline void seq_print_param(struct seq_file *m,
const char *name,
const char *type,
const void *x)
{
if (!__builtin_strcmp(type, "bool"))
seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
else if (!__builtin_strcmp(type, "int"))
seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
else if (!__builtin_strcmp(type, "char *"))
seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
else
BUILD_BUG();
}
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -43,10 +60,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
kernel_param_lock(THIS_MODULE);
#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
kernel_param_unlock(THIS_MODULE);
return 0;
}
@ -428,7 +452,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
ggtt->base.total, ggtt->mappable_end);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@ -456,7 +480,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_list);
client_link);
rcu_read_lock();
task = pid_task(request && request->ctx->pid ?
request->ctx->pid : file->pid,
@ -676,14 +700,14 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
spin_lock_irq(&b->lock);
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@ -827,10 +851,22 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe)
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
}
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
intel_display_power_put(dev_priv, power_domain);
}
seq_printf(m, "Master IER:\t%08x\n",
I915_READ(VLV_MASTER_IER));
@ -928,6 +964,61 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
struct i915_gpu_state *error = file->private_data;
struct drm_i915_error_state_buf str;
ssize_t ret;
loff_t tmp;
if (!error)
return 0;
ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
if (ret)
return ret;
ret = i915_error_state_to_str(&str, error);
if (ret)
goto out;
tmp = 0;
ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
if (ret < 0)
goto out;
*pos = str.start + ret;
out:
i915_error_state_buf_release(&str);
return ret;
}
static int gpu_state_release(struct inode *inode, struct file *file)
{
i915_gpu_state_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct i915_gpu_state *gpu;
gpu = i915_capture_gpu_state(inode->i_private);
if (!gpu)
return -ENOMEM;
file->private_data = gpu;
return 0;
}
static const struct file_operations i915_gpu_info_fops = {
.owner = THIS_MODULE,
.open = i915_gpu_info_open,
.read = gpu_state_read,
.llseek = default_llseek,
.release = gpu_state_release,
};
static ssize_t
i915_error_state_write(struct file *filp,
@ -935,93 +1026,33 @@ i915_error_state_write(struct file *filp,
size_t cnt,
loff_t *ppos)
{
struct i915_error_state_file_priv *error_priv = filp->private_data;
struct i915_gpu_state *error = filp->private_data;
if (!error)
return 0;
DRM_DEBUG_DRIVER("Resetting error state\n");
i915_destroy_error_state(error_priv->i915);
i915_reset_error_state(error->i915);
return cnt;
}
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
struct i915_error_state_file_priv *error_priv;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
return -ENOMEM;
error_priv->i915 = dev_priv;
i915_error_state_get(&dev_priv->drm, error_priv);
file->private_data = error_priv;
file->private_data = i915_first_error_state(inode->i_private);
return 0;
}
static int i915_error_state_release(struct inode *inode, struct file *file)
{
struct i915_error_state_file_priv *error_priv = file->private_data;
i915_error_state_put(error_priv);
kfree(error_priv);
return 0;
}
static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
size_t count, loff_t *pos)
{
struct i915_error_state_file_priv *error_priv = file->private_data;
struct drm_i915_error_state_buf error_str;
loff_t tmp_pos = 0;
ssize_t ret_count = 0;
int ret;
ret = i915_error_state_buf_init(&error_str, error_priv->i915,
count, *pos);
if (ret)
return ret;
ret = i915_error_state_to_str(&error_str, error_priv);
if (ret)
goto out;
ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
error_str.buf,
error_str.bytes);
if (ret_count < 0)
ret = ret_count;
else
*pos = error_str.start + ret_count;
out:
i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
static const struct file_operations i915_error_state_fops = {
.owner = THIS_MODULE,
.open = i915_error_state_open,
.read = i915_error_state_read,
.read = gpu_state_read,
.write = i915_error_state_write,
.llseek = default_llseek,
.release = i915_error_state_release,
.release = gpu_state_release,
};
#endif
static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
*val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
return 0;
}
static int
i915_next_seqno_set(void *data, u64 val)
{
@ -1040,13 +1071,12 @@ i915_next_seqno_set(void *data, u64 val)
}
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
NULL, i915_next_seqno_set,
"0x%llx\n");
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
int ret = 0;
intel_runtime_pm_get(dev_priv);
@ -1109,10 +1139,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
goto out;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
@ -1147,7 +1173,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv, cagf);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
pm_ier = I915_READ(GEN6_PMIER);
@ -1198,21 +1223,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@ -1236,11 +1258,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_puts(m, "no P-state info available\n");
}
seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
out:
intel_runtime_pm_put(dev_priv);
return ret;
}
@ -1307,35 +1328,40 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
seq_printf(m, "Hangcheck active, fires in %dms\n",
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
jiffies));
} else
seq_printf(m, "Hangcheck inactive\n");
else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
seq_puts(m, "Hangcheck active, work pending\n");
else
seq_puts(m, "Hangcheck inactive\n");
seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *rb;
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
intel_engine_last_submit(engine),
engine->timeline->inflight_seqnos);
seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
yesno(engine->hangcheck.stalled));
spin_lock_irq(&b->lock);
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
@ -1788,7 +1814,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@ -1808,8 +1834,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
(IS_GEN9_BC(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@ -2302,10 +2328,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(dev_priv->rps.power));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
100 * rpup / rpupei,
rpup && rpupei ? 100 * rpup / rpupei : 0,
dev_priv->rps.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
100 * rpdown / rpdownei,
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
dev_priv->rps.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
@ -2351,7 +2377,9 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
huc_fw->rsa_offset, huc_fw->rsa_size);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -2383,6 +2411,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
guc_fw->rsa_offset, guc_fw->rsa_size);
intel_runtime_pm_get(dev_priv);
tmp = I915_READ(GUC_STATUS);
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
@ -2396,6 +2426,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
for (i = 0; i < 16; i++)
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -2777,15 +2809,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
seq_printf(m, "%-25s %d\n", power_well->name,
power_well->count);
for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
power_domain++) {
if (!(BIT(power_domain) & power_well->domains))
continue;
for_each_power_domain(power_domain, power_well->domains)
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
}
mutex_unlock(&power_domains->lock);
@ -3205,6 +3232,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s\n",
yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
@ -3212,11 +3244,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
u64 addr;
seq_printf(m, "%s\n", engine->name);
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
rcu_read_lock();
@ -3294,15 +3327,21 @@ static int i915_engine_info(struct seq_file *m, void *unused)
rcu_read_lock();
rq = READ_ONCE(engine->execlist_port[0].request);
if (rq)
print_request(m, rq, "\t\tELSP[0] ");
else
if (rq) {
seq_printf(m, "\t\tELSP[0] count=%d, ",
engine->execlist_port[0].count);
print_request(m, rq, "rq: ");
} else {
seq_printf(m, "\t\tELSP[0] idle\n");
}
rq = READ_ONCE(engine->execlist_port[1].request);
if (rq)
print_request(m, rq, "\t\tELSP[1] ");
else
if (rq) {
seq_printf(m, "\t\tELSP[1] count=%d, ",
engine->execlist_port[1].count);
print_request(m, rq, "rq: ");
} else {
seq_printf(m, "\t\tELSP[1] idle\n");
}
rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
@ -3320,14 +3359,14 @@ static int i915_engine_info(struct seq_file *m, void *unused)
I915_READ(RING_PP_DIR_DCLV(engine)));
}
spin_lock_irq(&b->lock);
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
seq_puts(m, "\n");
}
@ -3746,7 +3785,19 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
intel_dp->compliance.test_data.edid);
else if (intel_dp->compliance.test_type ==
DP_TEST_LINK_VIDEO_PATTERN) {
seq_printf(m, "hdisplay: %d\n",
intel_dp->compliance.test_data.hdisplay);
seq_printf(m, "vdisplay: %d\n",
intel_dp->compliance.test_data.vdisplay);
seq_printf(m, "bpc: %u\n",
intel_dp->compliance.test_data.bpc);
}
} else
seq_puts(m, "0");
}
@ -4237,7 +4288,8 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
intel_set_rps(dev_priv, val);
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
mutex_unlock(&dev_priv->rps.hw_lock);
@ -4292,7 +4344,8 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
intel_set_rps(dev_priv, val);
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
mutex_unlock(&dev_priv->rps.hw_lock);
@ -4418,7 +4471,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@ -4567,6 +4620,81 @@ static const struct file_operations i915_forcewake_fops = {
.release = i915_forcewake_release,
};
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
return 0;
}
static ssize_t i915_hpd_storm_ctl_write(struct file *file,
const char __user *ubuf, size_t len,
loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
unsigned int new_threshold;
int i;
char *newline;
char tmp[16];
if (len >= sizeof(tmp))
return -EINVAL;
if (copy_from_user(tmp, ubuf, len))
return -EFAULT;
tmp[len] = '\0';
/* Strip newline, if any */
newline = strchr(tmp, '\n');
if (newline)
*newline = '\0';
if (strcmp(tmp, "reset") == 0)
new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
else if (kstrtouint(tmp, 10, &new_threshold) != 0)
return -EINVAL;
if (new_threshold > 0)
DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
new_threshold);
else
DRM_DEBUG_KMS("Disabling HPD storm detection\n");
spin_lock_irq(&dev_priv->irq_lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
flush_delayed_work(&dev_priv->hotplug.reenable_work);
return len;
}
static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
{
return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
}
static const struct file_operations i915_hpd_storm_ctl_fops = {
.owner = THIS_MODULE,
.open = i915_hpd_storm_ctl_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = i915_hpd_storm_ctl_write
};
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@ -4633,6 +4761,7 @@ static const struct i915_debugfs_files {
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
@ -4643,7 +4772,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops}
{"i915_guc_log_control", &i915_guc_log_control_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)

Просмотреть файл

@ -43,6 +43,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@ -248,6 +249,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_IRQ_ACTIVE:
case I915_PARAM_ALLOW_BATCHBUFFER:
case I915_PARAM_LAST_DISPATCH:
case I915_PARAM_HAS_EXEC_CONSTANTS:
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
@ -274,9 +276,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD2:
value = !!dev_priv->engine[VCS2];
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_GEN(dev_priv) >= 4;
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
break;
@ -318,10 +317,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
/* The register is already force-woken. We dont need
* any rpm here
*/
intel_runtime_pm_get(dev_priv);
value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
intel_runtime_pm_put(dev_priv);
break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
@ -350,6 +348,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
case I915_PARAM_HAS_EXEC_SOFTPIN:
case I915_PARAM_HAS_EXEC_ASYNC:
case I915_PARAM_HAS_EXEC_FENCE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@ -756,6 +756,15 @@ out_err:
return -ENOMEM;
}
static void i915_engines_cleanup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id)
kfree(engine);
}
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
@ -769,10 +778,17 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
if (IS_HSW_EARLY_SDV(dev_priv) ||
IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
bool pre = false;
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
"It may not be fully functional.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
}
}
/**
@ -808,6 +824,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
spin_lock_init(&dev_priv->wm.dsparb_lock);
@ -818,12 +835,15 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->pps_mutex);
intel_uc_init_early(dev_priv);
i915_memcpy_init_early(dev_priv);
ret = intel_engines_init_early(dev_priv);
if (ret)
return ret;
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
return ret;
goto err_engines;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@ -852,6 +872,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
err_workqueues:
i915_workqueues_cleanup(dev_priv);
err_engines:
i915_engines_cleanup(dev_priv);
return ret;
}
@ -864,6 +886,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_engines_cleanup(dev_priv);
}
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
@ -930,6 +953,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
goto put_bridge;
intel_uncore_init(dev_priv);
i915_gem_init_mmio(dev_priv);
return 0;
@ -967,7 +991,7 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
}
/**
@ -1185,11 +1209,15 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct drm_i915_private *dev_priv;
int ret;
if (i915.nuclear_pageflip)
driver.driver_features |= DRIVER_ATOMIC;
/* Enable nuclear pageflip on ILK+, except vlv/chv */
if (!i915.nuclear_pageflip &&
(match_info->gen < 5 || match_info->has_gmch_display))
driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
@ -1197,8 +1225,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
kfree(dev_priv);
return ret;
goto out_free;
}
dev_priv->drm.pdev = pdev;
@ -1206,7 +1233,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
goto out_free_priv;
goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm);
@ -1270,9 +1297,11 @@ out_runtime_pm_put:
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
out_free_priv:
out_fini:
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
drm_dev_unref(&dev_priv->drm);
drm_dev_fini(&dev_priv->drm);
out_free:
kfree(dev_priv);
return ret;
}
@ -1280,6 +1309,8 @@ void i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_modeset_acquire_ctx ctx;
int ret;
intel_fbdev_fini(dev);
@ -1288,6 +1319,24 @@ void i915_driver_unload(struct drm_device *dev)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
drm_modeset_acquire_init(&ctx, 0);
while (1) {
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (!ret)
ret = drm_atomic_helper_disable_all(dev, &ctx);
if (ret != -EDEADLK)
break;
drm_modeset_backoff(&ctx);
}
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
intel_gvt_cleanup(dev_priv);
i915_driver_unregister(dev_priv);
@ -1317,7 +1366,7 @@ void i915_driver_unload(struct drm_device *dev)
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev_priv);
i915_reset_error_state(dev_priv);
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
@ -1333,8 +1382,16 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_cleanup_mmio(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_driver_cleanup_early(dev_priv);
drm_dev_fini(&dev_priv->drm);
kfree(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@ -1716,6 +1773,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
i915_gem_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
out:
@ -1787,7 +1846,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
i915_gem_reset_finish(dev_priv);
i915_gem_reset(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@ -1813,6 +1872,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
i915_queue_hangcheck(dev_priv);
wakeup:
i915_gem_reset_finish(dev_priv);
enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
@ -2532,7 +2592,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@ -2574,7 +2634,8 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER | DRIVER_MODESET,
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
.release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
@ -2603,3 +2664,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_drm.c"
#endif

Просмотреть файл

@ -79,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170123"
#define DRIVER_TIMESTAMP 1485156432
#define DRIVER_DATE "20170306"
#define DRIVER_TIMESTAMP 1488785683
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@ -293,6 +293,7 @@ enum plane_id {
PLANE_PRIMARY,
PLANE_SPRITE0,
PLANE_SPRITE1,
PLANE_SPRITE2,
PLANE_CURSOR,
I915_MAX_PLANES,
};
@ -343,6 +344,11 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@ -384,6 +390,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
#define HPD_STORM_DEFAULT_THRESHOLD 5
struct i915_hotplug {
struct work_struct hotplug_work;
@ -407,6 +415,8 @@ struct i915_hotplug {
struct work_struct poll_init_work;
bool poll_enabled;
unsigned int hpd_storm_threshold;
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
@ -494,7 +504,35 @@ struct i915_hotplug {
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
for_each_if ((1 << (domain)) & (mask))
for_each_if (BIT_ULL(domain) & (mask))
#define for_each_power_well(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
(__power_well) - (__dev_priv)->power_domains.power_wells < \
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_rev(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
(__power_well)--)
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
for_each_power_well(__dev_priv, __power_well) \
for_each_if ((__power_well)->domains & (__domain_mask))
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
for_each_power_well_rev(__dev_priv, __power_well) \
for_each_if ((__power_well)->domains & (__domain_mask))
#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
(plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
(__i)++) \
for_each_if (plane_state)
struct drm_i915_private;
struct i915_mm_struct;
@ -600,9 +638,13 @@ struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
struct intel_cdclk_state;
struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
@ -617,7 +659,6 @@ struct drm_i915_display_funcs {
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@ -636,7 +677,8 @@ struct drm_i915_display_funcs {
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@ -856,6 +898,7 @@ enum intel_platform {
INTEL_BROXTON,
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
INTEL_MAX_PLATFORMS
};
struct intel_device_info {
@ -890,7 +933,7 @@ struct intel_device_info {
struct intel_display_error_state;
struct drm_i915_error_state {
struct i915_gpu_state {
struct kref ref;
struct timeval time;
struct timeval boottime;
@ -900,16 +943,20 @@ struct drm_i915_error_state {
char error_msg[128];
bool simulated;
bool awake;
bool wakelock;
bool suspended;
int iommu;
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
struct i915_params params;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 gtier[4];
u32 gtier[4], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@ -923,6 +970,7 @@ struct drm_i915_error_state {
u32 gab_ctl;
u32 gfx_mode;
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
@ -970,6 +1018,16 @@ struct drm_i915_error_state {
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
u32 handle;
u32 hw_id;
int ban_score;
int active;
int guilty;
} context;
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
@ -1003,10 +1061,6 @@ struct drm_i915_error_state {
u32 pp_dir_base;
};
} vm_info;
pid_t pid;
char comm[TASK_COMM_LEN];
int context_bans;
} engine[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
@ -1395,7 +1449,7 @@ struct i915_power_well {
int count;
/* cached hw enabled state */
bool hw_enabled;
unsigned long domains;
u64 domains;
/* unique identifier for this power well */
unsigned long id;
/*
@ -1456,7 +1510,7 @@ struct i915_gem_mm {
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
phys_addr_t stolen_base; /* limited to low memory (32-bit) */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@ -1498,11 +1552,6 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
struct i915_error_state_file_priv {
struct drm_i915_private *i915;
struct drm_i915_error_state *error;
};
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
@ -1519,7 +1568,7 @@ struct i915_gpu_error {
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
struct i915_gpu_state *first_error;
unsigned long missed_irq_rings;
@ -2053,6 +2102,10 @@ struct i915_oa_ops {
bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
};
struct intel_cdclk_state {
unsigned int cdclk, vco, ref;
};
struct drm_i915_private {
struct drm_device drm;
@ -2063,8 +2116,6 @@ struct drm_i915_private {
const struct intel_device_info info;
int relative_constants_mode;
void __iomem *regs;
struct intel_uncore uncore;
@ -2157,13 +2208,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
unsigned int cdclk_freq, max_cdclk_freq;
/*
* For reading holding any crtc lock is sufficient,
* for writing must hold all of them.
*/
unsigned int atomic_cdclk_freq;
unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
@ -2171,8 +2216,22 @@ struct drm_i915_private {
unsigned int czclk_freq;
struct {
unsigned int vco, ref;
} cdclk_pll;
/*
* The current logical cdclk state.
* See intel_atomic_state.cdclk.logical
*
* For reading holding any crtc lock is sufficient,
* for writing must hold all of them.
*/
struct intel_cdclk_state logical;
/*
* The current actual cdclk state.
* See intel_atomic_state.cdclk.actual
*/
struct intel_cdclk_state actual;
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
} cdclk;
/**
* wq - Driver workqueue for GEM.
@ -2752,6 +2811,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@ -2767,8 +2832,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@ -2810,9 +2876,7 @@ intel_info(const struct drm_i915_private *dev_priv)
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
(IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
IS_SKL_GT3(dev_priv) || \
IS_SKL_GT4(dev_priv))
(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@ -2952,6 +3016,9 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_early(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
@ -3129,6 +3196,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_load_init(struct drm_i915_private *dev_priv);
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
@ -3341,15 +3409,17 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_fault *vmf);
@ -3543,7 +3613,7 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
const struct i915_error_state_file_priv *error);
const struct i915_gpu_state *gpu);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos);
@ -3552,13 +3622,28 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_i915_private *dev_priv);
static inline struct i915_gpu_state *
i915_gpu_state_get(struct i915_gpu_state *gpu)
{
kref_get(&gpu->ref);
return gpu;
}
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
if (gpu)
kref_put(&gpu->ref, __i915_gpu_state_free);
}
struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
#else
@ -3568,7 +3653,13 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
{
}
static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
static inline struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
return NULL;
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@ -3708,7 +3799,7 @@ extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@ -3724,7 +3815,6 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
@ -3734,7 +3824,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
@ -3953,14 +4043,34 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
static inline bool
__i915_request_irq_complete(struct drm_i915_gem_request *req)
__i915_request_irq_complete(const struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
u32 seqno;
/* Note that the engine may have wrapped around the seqno, and
* so our request->global_seqno will be ahead of the hardware,
* even though it completed the request before wrapping. We catch
* this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence.
*/
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
return true;
/* The request was dequeued before we were awoken. We check after
* inspecting the hw to confirm that this was the same request
* that generated the HWS update. The memory barriers within
* the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request.
*/
seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
if (__i915_gem_request_completed(req))
if (__i915_gem_request_completed(req, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
@ -3975,9 +4085,9 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
* is woken.
*/
if (engine->irq_seqno_barrier &&
rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
struct task_struct *tsk;
test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
/* The ordering of irq_posted versus applying the barrier
* is crucial. The clearing of the current irq_posted must
@ -3999,19 +4109,18 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
* the seqno before we believe it coherent since they see
* irq_posted == false but we are still running).
*/
rcu_read_lock();
tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
if (tsk && tsk != current)
spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_wait && b->irq_wait->tsk != current)
/* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will
* be woken by whomever made the change. We only have
* to worry about when we steal the irq-posted for
* ourself.
*/
wake_up_process(tsk);
rcu_read_unlock();
wake_up_process(b->irq_wait->tsk);
spin_unlock_irqrestore(&b->irq_lock, flags);
if (__i915_gem_request_completed(req))
if (__i915_gem_request_completed(req, seqno))
return true;
}
@ -4042,4 +4151,10 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
{
return (obj->cache_level != I915_CACHE_NONE ||
HAS_LLC(to_i915(obj->base.dev)));
}
#endif

Просмотреть файл

@ -29,12 +29,14 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@ -47,18 +49,12 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
{
return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
}
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return false;
if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
if (!i915_gem_object_is_coherent(obj))
return true;
return obj->pin_display;
@ -254,7 +250,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
!i915_gem_object_is_coherent(obj))
drm_clflush_sg(pages);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@ -312,6 +308,8 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys,
};
static const struct drm_i915_gem_object_ops i915_gem_object_ops;
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@ -399,7 +397,7 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
* meantime, assume that this client is the only one
@ -424,7 +422,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
long timeout,
struct intel_rps_client *rps)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
@ -449,15 +449,26 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
prune_fences = count && timeout >= 0;
} else {
excl = reservation_object_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
if (excl && timeout >= 0) {
timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
prune_fences = timeout >= 0;
}
dma_fence_put(excl);
if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
reservation_object_lock(resv, NULL);
if (!__read_seqcount_retry(&resv->seq, seq))
reservation_object_add_excl_fence(resv, NULL);
reservation_object_unlock(resv);
}
return timeout;
}
@ -585,9 +596,18 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj);
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_xfer;
return 0;
err_xfer:
obj->ops = &i915_gem_object_ops;
return ret;
}
static int
@ -608,7 +628,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(to_i915(obj->base.dev));
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
intel_fb_obj_flush(obj, ORIGIN_CPU);
return 0;
}
@ -771,8 +791,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
*needs_clflush = !i915_gem_object_is_coherent(obj);
if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, false);
@ -828,8 +847,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* before writing.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
*needs_clflush |= !i915_gem_object_is_coherent(obj);
if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, true);
@ -1257,7 +1275,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
intel_fb_obj_flush(obj, ORIGIN_CPU);
mutex_lock(&i915->drm.struct_mutex);
out_unpin:
@ -1393,7 +1411,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
intel_fb_obj_flush(obj, ORIGIN_CPU);
i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@ -1596,23 +1614,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_i915_gem_object *obj;
int err = 0;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Pinned buffers may be scanout, so flush the cache */
if (READ_ONCE(obj->pin_display)) {
err = i915_mutex_lock_interruptible(dev);
if (!err) {
i915_gem_object_flush_cpu_write_domain(obj);
mutex_unlock(&dev->struct_mutex);
}
}
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
return err;
return 0;
}
/**
@ -2223,17 +2234,17 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
static void i915_sg_trim(struct sg_table *orig_st)
static bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
unsigned int i;
if (orig_st->nents == orig_st->orig_nents)
return;
return false;
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return;
return false;
new_sg = new_st.sgl;
for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
@ -2246,6 +2257,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
sg_free_table(orig_st);
*orig_st = new_st;
return true;
}
static struct sg_table *
@ -2596,7 +2608,8 @@ static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
struct drm_i915_gem_request *request, *active = NULL;
unsigned long flags;
/* We are called by the error capture and reset at a random
* point in time. In particular, note that neither is crucially
@ -2606,15 +2619,22 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) {
if (__i915_gem_request_completed(request))
if (__i915_gem_request_completed(request,
request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
return request;
}
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&request->fence.flags));
return NULL;
active = request;
break;
}
spin_unlock_irqrestore(&engine->timeline->lock, flags);
return active;
}
static bool engine_stalled(struct intel_engine_cs *engine)
@ -2641,7 +2661,30 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *request;
/* Prevent the signaler thread from updating the request
* state (by calling dma_fence_signal) as we are processing
* the reset. The write from the GPU of the seqno is
* asynchronous and the signaler thread may see a different
* value to us and declare the request complete, even though
* the reset routine have picked that request as the active
* (incomplete) request. This conflict is not handled
* gracefully!
*/
kthread_park(engine->breadcrumbs.signaler);
/* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its engine->irq_tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP.
* Turning off the engine->irq_tasklet until the reset is over
* prevents the race.
*/
tasklet_kill(&engine->irq_tasklet);
tasklet_disable(&engine->irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
if (engine_stalled(engine)) {
request = i915_gem_find_active_request(engine);
@ -2739,9 +2782,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
request = i915_gem_find_active_request(engine);
if (request && i915_gem_reset_request(request)) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@ -2756,7 +2796,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
engine->reset_hw(engine, request);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -2765,8 +2805,14 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
i915_gem_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id)
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
i915_gem_reset_engine(engine);
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
}
i915_gem_restore_fences(dev_priv);
@ -2778,6 +2824,19 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
}
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
tasklet_enable(&engine->irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
}
}
static void nop_submit_request(struct drm_i915_gem_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
@ -2900,8 +2959,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
* new request is submitted.
*/
wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
intel_execlists_idle(dev_priv), 10);
intel_engines_are_idle(dev_priv),
10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
@ -2926,11 +2985,13 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (dev_priv->gt.active_requests)
goto out_unlock;
if (wait_for(intel_execlists_idle(dev_priv), 10))
if (wait_for(intel_engines_are_idle(dev_priv), 10))
DRM_ERROR("Timeout waiting for engines to idle\n");
for_each_engine(engine, dev_priv, id)
for_each_engine(engine, dev_priv, id) {
intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
}
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
@ -3029,6 +3090,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout_ns < 0)
args->timeout_ns = 0;
/*
* Apparently ktime isn't accurate enough and occasionally has a
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
* things up to make the test happy. We allow up to 1 jiffy.
*
* This is a regression from the timespec->ktime conversion.
*/
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
}
i915_gem_object_put(obj);
@ -3071,41 +3142,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
return 0;
}
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (!obj->mm.pages)
return;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen || obj->phys_handle)
return;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
* flushed/invalidated. As we always have to emit invalidations
* and flushes when moving into and out of the RENDER domain, correct
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
obj->cache_dirty = true;
return;
}
trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
@ -3134,12 +3170,9 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
I915_GEM_DOMAIN_GTT);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@ -3149,13 +3182,27 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj, obj->pin_display);
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
I915_GEM_DOMAIN_CPU);
}
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
return;
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->base.write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
if (!READ_ONCE(obj->pin_display))
return;
mutex_lock(&obj->base.dev->struct_mutex);
__i915_gem_object_flush_for_display(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
}
/**
@ -3169,7 +3216,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@ -3207,9 +3253,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
@ -3221,10 +3264,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->mm.dirty = true;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
i915_gem_object_unpin_pages(obj);
return 0;
}
@ -3349,7 +3388,7 @@ restart:
}
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
i915_gem_object_is_coherent(obj))
obj->cache_dirty = true;
list_for_each_entry(vma, &obj->vma_list, obj_link)
@ -3461,7 +3500,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
u32 old_read_domains, old_write_domain;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@ -3521,24 +3559,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
}
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
__i915_gem_object_flush_for_display(obj);
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return vma;
err_unpin_display:
@ -3574,7 +3602,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@ -3593,13 +3620,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, false);
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
@ -3616,10 +3639,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return 0;
}
@ -3647,16 +3666,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
/*
* Note that the request might not have been submitted yet.
* In which case emitted_jiffies will be zero.
*/
if (!request->emitted_jiffies)
continue;
if (target) {
list_del(&target->client_link);
target->file_priv = NULL;
}
target = request;
}
@ -3942,7 +3959,7 @@ frontbuffer_retire(struct i915_gem_active *active,
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
intel_fb_obj_flush(obj, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@ -4203,11 +4220,29 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
!i915_gem_context_is_kernel(engine->last_retired_context));
}
void i915_gem_sanitize(struct drm_i915_private *i915)
{
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
* try to take over. The only way to remove the earlier state
* is by resetting. However, resetting on earlier gen is tricky as
* it may impact the display and we are uncertain about the stability
* of the reset, so we only reset recent machines with logical
* context support (that must be reset to remove any stray contexts).
*/
if (HAS_HW_CONTEXTS(i915)) {
int reset = intel_gpu_reset(i915, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
int ret;
intel_runtime_pm_get(dev_priv);
intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
@ -4222,13 +4257,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
*/
ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
goto err;
goto err_unlock;
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
goto err;
goto err_unlock;
i915_gem_retire_requests(dev_priv);
GEM_BUG_ON(dev_priv->gt.active_requests);
@ -4252,7 +4287,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
WARN_ON(!intel_execlists_idle(dev_priv));
WARN_ON(!intel_engines_are_idle(dev_priv));
/*
* Neither the BIOS, ourselves or any other kernel
@ -4273,15 +4308,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev_priv)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
i915_gem_sanitize(dev_priv);
goto out_rpm_put;
return 0;
err:
err_unlock:
mutex_unlock(&dev->struct_mutex);
out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}
@ -4351,11 +4384,24 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
}
int
i915_gem_init_hw(struct drm_i915_private *dev_priv)
static int __i915_gem_restart_engines(void *data)
{
struct drm_i915_private *i915 = data;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
for_each_engine(engine, i915, id) {
err = engine->init_hw(engine);
if (err)
return err;
}
return 0;
}
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
int ret;
dev_priv->gt.last_init_time = ktime_get();
@ -4401,11 +4447,9 @@ i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
/* Need to do basic initialisation of all rings first: */
for_each_engine(engine, dev_priv, id) {
ret = engine->init_hw(engine);
if (ret)
goto out;
}
ret = __i915_gem_restart_engines(dev_priv);
if (ret)
goto out;
intel_mocs_init_l3cc_table(dev_priv);
@ -4446,6 +4490,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_clflush_init(dev_priv);
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
@ -4494,6 +4540,11 @@ out_unlock:
return ret;
}
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
}
void
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
{
@ -4583,8 +4634,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
@ -4609,7 +4658,9 @@ err_out:
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
WARN_ON(dev_priv->mm.object_count);
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
@ -4627,14 +4678,10 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_shrink_all(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
}
@ -4685,7 +4732,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list)
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
@ -4949,3 +4996,11 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
sg = i915_gem_object_get_sg(obj, n, &offset);
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
#include "selftests/huge_gem_object.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif

Просмотреть файл

@ -28,9 +28,18 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#define GEM_WARN_ON(expr) WARN_ON(expr)
#define GEM_DEBUG_DECL(var) var
#define GEM_DEBUG_EXEC(expr) expr
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#else
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
#define GEM_DEBUG_DECL(var)
#define GEM_DEBUG_EXEC(expr) do { } while (0)
#define GEM_DEBUG_BUG_ON(expr)
#endif
#define I915_NUM_ENGINES 5

Просмотреть файл

@ -122,9 +122,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (tmp->base.size >= size) {
/* Clear the set of shared fences early */
ww_mutex_lock(&tmp->resv->lock, NULL);
reservation_object_lock(tmp->resv, NULL);
reservation_object_add_excl_fence(tmp->resv, NULL);
ww_mutex_unlock(&tmp->resv->lock);
reservation_object_unlock(tmp->resv);
obj = tmp;
break;

Просмотреть файл

@ -0,0 +1,189 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
#include "intel_frontbuffer.h"
#include "i915_gem_clflush.h"
static DEFINE_SPINLOCK(clflush_lock);
static u64 clflush_context;
struct clflush {
struct dma_fence dma; /* Must be first for dma_fence_free() */
struct i915_sw_fence wait;
struct work_struct work;
struct drm_i915_gem_object *obj;
};
static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
{
return "clflush";
}
static bool i915_clflush_enable_signaling(struct dma_fence *fence)
{
return true;
}
static void i915_clflush_release(struct dma_fence *fence)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
i915_sw_fence_fini(&clflush->wait);
BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
dma_fence_free(&clflush->dma);
}
static const struct dma_fence_ops i915_clflush_ops = {
.get_driver_name = i915_clflush_get_driver_name,
.get_timeline_name = i915_clflush_get_timeline_name,
.enable_signaling = i915_clflush_enable_signaling,
.wait = dma_fence_default_wait,
.release = i915_clflush_release,
};
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false;
intel_fb_obj_flush(obj, ORIGIN_CPU);
}
static void i915_clflush_work(struct work_struct *work)
{
struct clflush *clflush = container_of(work, typeof(*clflush), work);
struct drm_i915_gem_object *obj = clflush->obj;
if (!obj->cache_dirty)
goto out;
if (i915_gem_object_pin_pages(obj)) {
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
goto out;
}
__i915_do_clflush(obj);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
dma_fence_signal(&clflush->dma);
dma_fence_put(&clflush->dma);
}
static int __i915_sw_fence_call
i915_clflush_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
switch (state) {
case FENCE_COMPLETE:
schedule_work(&clflush->work);
break;
case FENCE_FREE:
dma_fence_put(&clflush->dma);
break;
}
return NOTIFY_DONE;
}
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct clflush *clflush;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
* Similarly, we only access struct pages through the CPU cache, so
* anything not backed by physical memory we consider to be always
* coherent and not need clflushing.
*/
if (!i915_gem_object_has_struct_page(obj))
return;
obj->cache_dirty = true;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
* flushed/invalidated. As we always have to emit invalidations
* and flushes when moving into and out of the RENDER domain, correct
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
return;
trace_i915_gem_object_clflush(obj);
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
if (clflush) {
dma_fence_init(&clflush->dma,
&i915_clflush_ops,
&clflush_lock,
clflush_context,
0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
clflush->obj = i915_gem_object_get(obj);
INIT_WORK(&clflush->work, i915_clflush_work);
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &clflush->dma);
reservation_object_unlock(obj->resv);
i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
} else {
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
}
}
void i915_gem_clflush_init(struct drm_i915_private *i915)
{
clflush_context = dma_fence_context_alloc(1);
}

Просмотреть файл

@ -0,0 +1,37 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_GEM_CLFLUSH_H__
#define __I915_GEM_CLFLUSH_H__
struct drm_i915_private;
struct drm_i915_gem_object;
void i915_gem_clflush_init(struct drm_i915_private *i915);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
#define I915_CLFLUSH_SYNC BIT(1)
#endif /* __I915_GEM_CLFLUSH_H__ */

Просмотреть файл

@ -92,21 +92,6 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
static int get_context_size(struct drm_i915_private *dev_priv)
{
int ret;
@ -236,6 +221,30 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
return 0;
}
static u32 default_desc_template(const struct drm_i915_private *i915,
const struct i915_hw_ppgtt *ppgtt)
{
u32 address_mode;
u32 desc;
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(i915))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers
* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
*/
return desc;
}
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@ -257,8 +266,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
ctx->ggtt_alignment = get_context_alignment(dev_priv);
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@ -309,8 +316,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
@ -332,6 +339,13 @@ err_out:
return ERR_PTR(ret);
}
static void __destroy_hw_context(struct i915_gem_context *ctx,
struct drm_i915_file_private *file_priv)
{
idr_remove(&file_priv->context_idr, ctx->user_handle);
context_close(ctx);
}
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
@ -356,12 +370,12 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
idr_remove(&file_priv->context_idr, ctx->user_handle);
context_close(ctx);
__destroy_hw_context(ctx, file_priv);
return ERR_CAST(ppgtt);
}
ctx->ppgtt = ppgtt;
ctx->desc_template = default_desc_template(dev_priv, ppgtt);
}
trace_i915_context_create(ctx);
@ -400,7 +414,8 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
if (!i915.enable_guc_submission)
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
@ -451,6 +466,11 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
return PTR_ERR(ctx);
}
/* For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id.
*/
GEM_BUG_ON(ctx->hw_id);
i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
@ -560,27 +580,15 @@ static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915.semaphores ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(dev_priv)) {
ret = engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
return ret;
}
int len;
/* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
@ -593,99 +601,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
cs = intel_ring_begin(req, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring,
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
*cs++ = _MASKED_BIT_ENABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring,
i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
*cs++ = MI_NOOP;
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
intel_ring_emit_reg(ring, last_reg);
intel_ring_emit(ring,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = _MASKED_BIT_DISABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
intel_ring_emit(ring,
MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, last_reg);
intel_ring_emit(ring,
i915_ggtt_offset(engine->scratch));
intel_ring_emit(ring, MI_NOOP);
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = i915_ggtt_offset(engine->scratch);
*cs++ = MI_NOOP;
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
}
intel_ring_advance(ring);
intel_ring_advance(req, cs);
return ret;
return 0;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
struct intel_ring *ring = req->ring;
int i, ret;
u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (ret)
return ret;
cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i]);
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
@ -1014,8 +1015,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
idr_remove(&file_priv->context_idr, ctx->user_handle);
context_close(ctx);
__destroy_hw_context(ctx, file_priv);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
@ -1164,3 +1164,8 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif

Просмотреть файл

@ -140,8 +140,6 @@ struct i915_gem_context {
*/
int priority;
/** ggtt_alignment: alignment restriction for context objects */
u32 ggtt_alignment;
/** ggtt_offset_bias: placement restriction for context objects */
u32 ggtt_offset_bias;

Просмотреть файл

@ -307,3 +307,8 @@ fail_detach:
return ERR_PTR(ret);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_dmabuf.c"
#include "selftests/i915_gem_dmabuf.c"
#endif

Просмотреть файл

@ -258,6 +258,9 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
int ret = 0;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
trace_i915_gem_evict_node(vm, target, flags);
/* Retire before we search the active list. Although we have
@ -271,11 +274,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
check_color = vm->mm.color_adjust;
if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start > vm->start)
if (start)
start -= I915_GTT_PAGE_SIZE;
if (end < vm->start + vm->total)
end += I915_GTT_PAGE_SIZE;
/* Always look at the page afterwards to avoid the end-of-GTT */
end += I915_GTT_PAGE_SIZE;
}
GEM_BUG_ON(start >= end);
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
/* If we find any non-objects (!vma), we cannot evict them */
@ -284,6 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
GEM_BUG_ON(!node->allocated);
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@ -387,3 +393,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_evict.c"
#endif

Просмотреть файл

@ -28,12 +28,14 @@
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@ -1110,13 +1112,18 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
continue;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj, 0);
obj->base.write_domain = 0;
}
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
}
/* Unconditionally flush any chipset caches (for streaming writes). */
@ -1297,12 +1304,12 @@ static void eb_export_fence(struct drm_i915_gem_object *obj,
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
ww_mutex_lock(&resv->lock, NULL);
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &req->fence);
else if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &req->fence);
ww_mutex_unlock(&resv->lock);
reservation_object_unlock(resv);
}
static void
@ -1313,8 +1320,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain)
@ -1325,32 +1330,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
eb_export_fence(obj, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int ret, i;
u32 *cs;
int i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
ret = intel_ring_begin(req, 4 * 3);
if (ret)
return ret;
cs = intel_ring_begin(req, 4 * 3);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
*cs++ = 0;
}
intel_ring_advance(ring);
intel_ring_advance(req, cs);
return 0;
}
@ -1403,15 +1407,20 @@ out:
return vma;
}
static void
add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
req->file_priv = file->driver_priv;
list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}
static int
execbuf_submit(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas)
{
struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@ -1422,56 +1431,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (ret)
return ret;
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && params->engine->id != RCS) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (instp_mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev_priv)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
return -EINVAL;
}
if (INTEL_INFO(dev_priv)->gen > 5 &&
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
}
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev_priv)->gen >= 6)
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
if (args->flags & I915_EXEC_CONSTANTS_MASK) {
DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
return -EINVAL;
}
if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
struct intel_ring *ring = params->request->ring;
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = instp_mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->request);
if (ret)
@ -1491,8 +1455,6 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (ret)
return ret;
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
return 0;
@ -1591,6 +1553,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_execbuffer_params *params = &params_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
int out_fence_fd = -1;
int ret;
bool need_relocs;
@ -1634,6 +1599,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags |= I915_DISPATCH_RS;
}
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
return -EINVAL;
}
if (args->flags & I915_EXEC_FENCE_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
goto err_in_fence;
}
}
/* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
@ -1778,6 +1757,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err_batch_unpin;
}
if (in_fence) {
ret = i915_gem_request_await_dma_fence(params->request,
in_fence);
if (ret < 0)
goto err_request;
}
if (out_fence_fd != -1) {
out_fence = sync_file_create(&params->request->fence);
if (!out_fence) {
ret = -ENOMEM;
goto err_request;
}
}
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
@ -1786,10 +1780,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->request->batch = params->batch;
ret = i915_gem_request_add_to_client(params->request, file);
if (ret)
goto err_request;
/*
* Save assorted stuff away to pass through to *_submission().
* NB: This data should be 'persistent' and not local as it will
@ -1802,9 +1792,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags;
params->ctx = ctx;
trace_i915_gem_request_queue(params->request, dispatch_flags);
ret = execbuf_submit(params, args, &eb->vmas);
err_request:
__i915_add_request(params->request, ret == 0);
add_to_client(params->request, file);
if (out_fence) {
if (ret == 0) {
fd_install(out_fence_fd, out_fence->file);
args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
args->rsvd2 |= (u64)out_fence_fd << 32;
out_fence_fd = -1;
} else {
fput(out_fence->file);
}
}
err_batch_unpin:
/*
@ -1826,6 +1830,10 @@ pre_mutex_err:
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
err_in_fence:
dma_fence_put(in_fence);
return ret;
}
@ -1933,11 +1941,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
if (args->rsvd2 != 0) {
DRM_DEBUG("dirty rvsd2 field\n");
return -EINVAL;
}
exec2_list = drm_malloc_gfp(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -36,9 +36,11 @@
#include <linux/io-mapping.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE 4096UL
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
@ -51,11 +53,11 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
typedef uint32_t gen6_pte_t;
typedef uint64_t gen8_pte_t;
typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
@ -67,7 +69,7 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
#define I915_PDES 512
#define I915_PDE_MASK (I915_PDES - 1)
@ -99,13 +101,20 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
/* GEN8 legacy style address is defined as a 3 level page table:
/* GEN8 32b style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
*
* GEN8 48b legacy style address is defined as a 4 level page table:
*/
#define GEN8_3LVL_PDPES 4
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
/* GEN8 48b style address is defined as a 4 level page table:
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
* PML4E | PDPE | PDE | PTE | offset
*/
@ -116,15 +125,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
* tables */
#define GEN8_PDPE_MASK 0x1ff
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_LEGACY_PDPES 4
#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@ -141,7 +141,7 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
struct sg_table;
@ -208,7 +208,7 @@ struct i915_page_dma {
/* For gen6/gen7 only. This is the offset in the GGTT
* where the page directory entries for PPGTT begin
*/
uint32_t ggtt_offset;
u32 ggtt_offset;
};
};
@ -218,28 +218,24 @@ struct i915_page_dma {
struct i915_page_table {
struct i915_page_dma base;
unsigned long *used_ptes;
unsigned int used_ptes;
};
struct i915_page_directory {
struct i915_page_dma base;
unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
unsigned int used_pdes;
};
struct i915_page_directory_pointer {
struct i915_page_dma base;
unsigned long *used_pdpes;
struct i915_page_directory **page_directory;
unsigned int used_pdpes;
};
struct i915_pml4 {
struct i915_page_dma base;
DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
@ -247,6 +243,7 @@ struct i915_address_space {
struct drm_mm mm;
struct i915_gem_timeline timeline;
struct drm_i915_private *i915;
struct device *dma;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
* principle, no information should leak from one context to another
@ -257,7 +254,6 @@ struct i915_address_space {
*/
struct drm_i915_file_private *file;
struct list_head global_link;
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
bool closed;
@ -297,6 +293,9 @@ struct i915_address_space {
*/
struct list_head unbound_list;
struct pagevec free_pages;
bool pt_kmap_wc;
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
@ -304,20 +303,19 @@ struct i915_address_space {
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length);
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length);
u64 start, u64 length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
uint64_t offset,
u64 offset,
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level cache_level, u32 flags);
u64 start,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
@ -326,10 +324,18 @@ struct i915_address_space {
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
};
#define i915_is_ggtt(V) (!(V)->file)
static inline bool
i915_vm_is_48bit(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@ -381,7 +387,6 @@ struct i915_hw_ppgtt {
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
@ -409,9 +414,9 @@ struct i915_hw_ppgtt {
(pt = (pd)->page_table[iter], true); \
++iter)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
{
const uint32_t mask = NUM_PTE(pde_shift) - 1;
const u32 mask = NUM_PTE(pde_shift) - 1;
return (address >> PAGE_SHIFT) & mask;
}
@ -420,11 +425,10 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
* does not cross a page table boundary, so the max value would be
* GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
*/
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
uint32_t pde_shift)
static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
{
const uint64_t mask = ~((1ULL << pde_shift) - 1);
uint64_t end;
const u64 mask = ~((1ULL << pde_shift) - 1);
u64 end;
WARN_ON(length == 0);
WARN_ON(offset_in_page(addr|length));
@ -437,26 +441,35 @@ static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
}
static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
static inline u32 i915_pde_index(u64 addr, u32 shift)
{
return (addr >> shift) & I915_PDE_MASK;
}
static inline uint32_t gen6_pte_index(uint32_t addr)
static inline u32 gen6_pte_index(u32 addr)
{
return i915_pte_index(addr, GEN6_PDE_SHIFT);
}
static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
static inline u32 gen6_pte_count(u32 addr, u32 length)
{
return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
}
static inline uint32_t gen6_pde_index(uint32_t addr)
static inline u32 gen6_pde_index(u32 addr)
{
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
if (i915_vm_is_48bit(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;
}
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
@ -471,7 +484,7 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \
length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
length > 0 && iter < i915_pdpes_per_pdp(vm) && \
(pd = (pdp)->page_directory[iter], true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \
@ -485,27 +498,27 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
static inline uint32_t gen8_pte_index(uint64_t address)
static inline u32 gen8_pte_index(u64 address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
}
static inline uint32_t gen8_pde_index(uint64_t address)
static inline u32 gen8_pde_index(u64 address)
{
return i915_pde_index(address, GEN8_PDE_SHIFT);
}
static inline uint32_t gen8_pdpe_index(uint64_t address)
static inline u32 gen8_pdpe_index(u64 address)
{
return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
}
static inline uint32_t gen8_pml4e_index(uint64_t address)
static inline u32 gen8_pml4e_index(u64 address)
{
return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
}
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
static inline u64 gen8_pte_count(u64 address, u64 length)
{
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
@ -513,9 +526,7 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{
return test_bit(n, ppgtt->pdp.used_pdpes) ?
px_dma(ppgtt->pdp.page_directory[n]) :
px_dma(ppgtt->base.scratch_pd);
return px_dma(ppgtt->pdp.page_directory[n]);
}
static inline struct i915_ggtt *
@ -525,6 +536,9 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ggtt, base);
}
int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);

Просмотреть файл

@ -35,8 +35,10 @@ static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
for (sg = st->sgl; sg; sg = __sg_next(sg))
__free_pages(sg_page(sg), get_order(sg->length));
for (sg = st->sgl; sg; sg = __sg_next(sg)) {
if (sg_page(sg))
__free_pages(sg_page(sg), get_order(sg->length));
}
sg_free_table(st);
kfree(st);
@ -133,6 +135,7 @@ create_st:
return st;
err:
sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg);
internal_free_pages(st);
return ERR_PTR(-ENOMEM);

Просмотреть файл

@ -33,6 +33,8 @@
#include <drm/i915_drm.h>
#include "i915_selftest.h"
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@ -84,6 +86,7 @@ struct drm_i915_gem_object {
struct list_head obj_exec_link;
struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
@ -162,19 +165,23 @@ struct drm_i915_gem_object {
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
unsigned int framebuffer_references;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
union {
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
struct work_struct *work;
} userptr;
unsigned long scratch;
};
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
@ -253,6 +260,16 @@ extern void drm_gem_object_unreference(struct drm_gem_object *);
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
reservation_object_unlock(obj->resv);
}
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
@ -299,6 +316,12 @@ i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->framebuffer_references);
}
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
@ -357,5 +380,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
return engine;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
#endif

Просмотреть файл

@ -72,7 +72,6 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&req->submit);
i915_sw_fence_fini(&req->execute);
kmem_cache_free(req->i915->requests, req);
}
@ -86,42 +85,20 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
struct drm_i915_private *dev_private;
struct drm_i915_file_private *file_priv;
WARN_ON(!req || !file || req->file_priv);
if (!req || !file)
return -EINVAL;
if (req->file_priv)
return -EINVAL;
dev_private = req->i915;
file_priv = file->driver_priv;
spin_lock(&file_priv->mm.lock);
req->file_priv = file_priv;
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
return 0;
}
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
struct drm_i915_file_private *file_priv = request->file_priv;
struct drm_i915_file_private *file_priv;
file_priv = request->file_priv;
if (!file_priv)
return;
spin_lock(&file_priv->mm.lock);
list_del(&request->client_list);
request->file_priv = NULL;
if (request->file_priv) {
list_del(&request->client_link);
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
}
@ -201,6 +178,92 @@ i915_priotree_init(struct i915_priotree *pt)
pt->priority = INT_MIN;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
if (wait_for(intel_engine_is_idle(engine), 50))
return -EBUSY;
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
while (intel_breadcrumbs_busy(engine))
cond_resched();
}
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
}
list_for_each_entry(timeline, &i915->gt.timelines, link) {
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
}
}
return 0;
}
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we
* will inject to ring
*/
return reset_all_global_seqno(dev_priv, seqno - 1);
}
static int reserve_seqno(struct intel_engine_cs *engine)
{
u32 active = ++engine->timeline->inflight_seqnos;
u32 seqno = engine->timeline->seqno;
int ret;
/* Reservation is fine until we need to wrap around */
if (likely(!add_overflows(seqno, active)))
return 0;
ret = reset_all_global_seqno(engine->i915, 0);
if (ret) {
engine->timeline->inflight_seqnos--;
return ret;
}
return 0;
}
static void unreserve_seqno(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->timeline->inflight_seqnos);
engine->timeline->inflight_seqnos--;
}
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@ -214,7 +277,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
GEM_BUG_ON(!i915_gem_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests);
@ -240,6 +302,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
&request->i915->gt.idle_work,
msecs_to_jiffies(100));
}
unreserve_seqno(request->engine);
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@ -310,88 +373,9 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req);
}
static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
static u32 timeline_get_seqno(struct intel_timeline *tl)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
while (intel_breadcrumbs_busy(i915))
cond_resched(); /* spin until threads are complete */
}
atomic_set(&timeline->seqno, seqno);
/* Finally reset hw state */
for_each_engine(engine, i915, id)
intel_engine_init_global_seqno(engine, seqno);
list_for_each_entry(timeline, &i915->gt.timelines, link) {
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
}
}
return 0;
}
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we
* will inject to ring
*/
return i915_gem_init_global_seqno(dev_priv, seqno - 1);
}
static int reserve_global_seqno(struct drm_i915_private *i915)
{
u32 active_requests = ++i915->gt.active_requests;
u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
int ret;
/* Reservation is fine until we need to wrap around */
if (likely(seqno + active_requests > seqno))
return 0;
ret = i915_gem_init_global_seqno(i915, 0);
if (ret) {
i915->gt.active_requests--;
return ret;
}
return 0;
}
static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
/* seqno only incremented under a mutex */
return ++tl->seqno.counter;
}
static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
{
return atomic_inc_return(&tl->seqno);
return ++tl->seqno;
}
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
@ -400,19 +384,19 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
struct intel_timeline *timeline;
u32 seqno;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
trace_i915_gem_request_execute(request);
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
assert_spin_locked(&timeline->lock);
seqno = timeline_get_seqno(timeline->common);
seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
request->previous_seqno = timeline->last_submitted_seqno;
timeline->last_submitted_seqno = seqno;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = seqno;
@ -420,7 +404,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
intel_engine_enable_signaling(request);
spin_unlock(&request->lock);
GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
@ -428,7 +411,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
i915_sw_fence_commit(&request->execute);
wake_up_all(&request->execute);
}
void i915_gem_request_submit(struct drm_i915_gem_request *request)
@ -444,6 +427,56 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
/* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
engine->timeline->seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = 0;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_cancel_signaling(request);
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
timeline = request->timeline;
GEM_BUG_ON(timeline == engine->timeline);
spin_lock(&timeline->lock);
list_move(&request->link, &timeline->requests);
spin_unlock(&timeline->lock);
/* We don't need to wake_up any waiters on request->execute, they
* will get woken by any other event or us re-adding this request
* to the engine timeline (__i915_gem_request_submit()). The waiters
* should be quite adapt at finding that the request now has a new
* global_seqno to the one they went to sleep on.
*/
}
void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
__i915_gem_request_unsubmit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
@ -452,6 +485,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_gem_request_submit(request);
request->engine->submit_request(request);
break;
@ -463,24 +497,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
static int __i915_sw_fence_call
execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), execute);
switch (state) {
case FENCE_COMPLETE:
break;
case FENCE_FREE:
i915_gem_request_put(request);
break;
}
return NOTIFY_DONE;
}
/**
* i915_gem_request_alloc - allocate a request structure
*
@ -517,14 +533,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
ret = reserve_global_seqno(dev_priv);
ret = reserve_seqno(engine);
if (ret)
goto err_unpin;
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
if (req && __i915_gem_request_completed(req))
if (req && i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@ -569,17 +585,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
&i915_fence_ops,
&req->lock,
req->timeline->fence_context,
__timeline_get_seqno(req->timeline->common));
timeline_get_seqno(req->timeline));
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
/* Ensure that the execute fence completes after the submit fence -
* as we complete the execute fence from within the submit fence
* callback, its completion would otherwise be visible first.
*/
i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
init_waitqueue_head(&req->execute);
i915_priotree_init(&req->priotree);
@ -614,6 +624,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*/
req->head = req->ring->tail;
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
return req;
err_ctx:
@ -624,7 +636,7 @@ err_ctx:
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
dev_priv->gt.active_requests--;
unreserve_seqno(engine);
err_unpin:
engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
@ -634,6 +646,7 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
u32 seqno;
int ret;
GEM_BUG_ON(to == from);
@ -656,14 +669,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret < 0 ? ret : 0;
}
if (!from->global_seqno) {
seqno = i915_gem_request_global_seqno(from);
if (!seqno) {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
if (seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@ -681,7 +695,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
to->timeline->sync_seqno[from->engine->id] = seqno;
return 0;
}
@ -827,6 +841,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
@ -836,8 +851,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* our i915_gem_request_alloc() and called __i915_add_request() before
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
request->fence.seqno));
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
/*
* To ensure that this call will not fail, space for its emissions
@ -865,10 +879,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
GEM_BUG_ON(err);
request->postfix = ring->tail;
ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@ -892,16 +905,14 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
list_add_tail(&request->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
request->fence.seqno));
timeline->last_submitted_seqno = request->fence.seqno;
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);
if (!request->i915->gt.active_requests++)
i915_gem_mark_busy(engine);
/* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@ -921,16 +932,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
}
static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
@ -964,9 +965,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
}
bool __i915_spin_request(const struct drm_i915_gem_request *req,
int state, unsigned long timeout_us)
u32 seqno, int state, unsigned long timeout_us)
{
unsigned int cpu;
struct intel_engine_cs *engine = req->engine;
unsigned int irq, cpu;
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
@ -978,11 +980,24 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
* takes to sleep on a request, on the order of a microsecond.
*/
irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (__i915_gem_request_completed(req))
if (seqno != i915_gem_request_global_seqno(req))
break;
if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno))
return true;
/* Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
*/
if (atomic_read(&engine->irq_count) != irq)
break;
if (signal_pending_state(state, current))
break;
@ -995,52 +1010,14 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
static long
__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
unsigned int flags,
long timeout)
static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
DEFINE_WAIT(reset);
DEFINE_WAIT(wait);
if (likely(!i915_reset_in_progress(&request->i915->gpu_error)))
return false;
if (flags & I915_WAIT_LOCKED)
add_wait_queue(q, &reset);
do {
prepare_to_wait(&request->execute.wait, &wait, state);
if (i915_sw_fence_done(&request->execute))
break;
if (flags & I915_WAIT_LOCKED &&
i915_reset_in_progress(&request->i915->gpu_error)) {
__set_current_state(TASK_RUNNING);
i915_reset(request->i915);
reset_wait_queue(q, &reset);
continue;
}
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
if (!timeout) {
timeout = -ETIME;
break;
}
timeout = io_schedule_timeout(timeout);
} while (1);
finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(q, &reset);
return timeout;
__set_current_state(TASK_RUNNING);
i915_reset(request->i915);
return true;
}
/**
@ -1068,7 +1045,9 @@ long i915_wait_request(struct drm_i915_gem_request *req,
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
DEFINE_WAIT_FUNC(reset, default_wake_function);
DEFINE_WAIT_FUNC(exec, default_wake_function);
struct intel_wait wait;
might_sleep();
@ -1085,27 +1064,45 @@ long i915_wait_request(struct drm_i915_gem_request *req,
if (!timeout)
return -ETIME;
trace_i915_gem_request_wait_begin(req);
trace_i915_gem_request_wait_begin(req, flags);
if (!i915_sw_fence_done(&req->execute)) {
timeout = __i915_request_wait_for_execute(req, flags, timeout);
if (timeout < 0)
add_wait_queue(&req->execute, &exec);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
intel_wait_init(&wait, req);
restart:
do {
set_current_state(state);
if (intel_wait_update_request(&wait, req))
break;
if (flags & I915_WAIT_LOCKED &&
__i915_wait_request_check_and_reset(req))
continue;
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
goto complete;
}
GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
}
GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
GEM_BUG_ON(!req->global_seqno);
if (!timeout) {
timeout = -ETIME;
goto complete;
}
timeout = io_schedule_timeout(timeout);
} while (1);
GEM_BUG_ON(!intel_wait_has_seqno(&wait));
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
goto complete;
set_current_state(state);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@ -1113,6 +1110,9 @@ long i915_wait_request(struct drm_i915_gem_request *req,
*/
goto wakeup;
if (flags & I915_WAIT_LOCKED)
__i915_wait_request_check_and_reset(req);
for (;;) {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@ -1126,7 +1126,8 @@ long i915_wait_request(struct drm_i915_gem_request *req,
timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait))
if (intel_wait_complete(&wait) &&
intel_wait_check_request(&wait, req))
break;
set_current_state(state);
@ -1151,25 +1152,25 @@ wakeup:
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
i915_reset_in_progress(&req->i915->gpu_error)) {
__set_current_state(TASK_RUNNING);
i915_reset(req->i915);
reset_wait_queue(&req->i915->gpu_error.wait_queue,
&reset);
__i915_wait_request_check_and_reset(req))
continue;
}
/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
break;
if (!intel_wait_check_request(&wait, req)) {
intel_engine_remove_wait(req->engine, &wait);
goto restart;
}
}
intel_engine_remove_wait(req->engine, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
__set_current_state(TASK_RUNNING);
complete:
__set_current_state(TASK_RUNNING);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(errq, &reset);
remove_wait_queue(&req->execute, &exec);
trace_i915_gem_request_wait_end(req);
return timeout;
@ -1178,14 +1179,21 @@ complete:
static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
u32 seqno = intel_engine_get_seqno(engine);
LIST_HEAD(retire);
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next,
&engine->timeline->requests, link) {
if (!__i915_gem_request_completed(request))
return;
if (!i915_seqno_passed(seqno, request->global_seqno))
break;
i915_gem_request_retire(request);
list_move_tail(&request->link, &retire);
}
spin_unlock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next, &retire, link)
i915_gem_request_retire(request);
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
@ -1201,3 +1209,8 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
engine_retire_requests(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_gem_request.c"
#endif

Просмотреть файл

@ -32,10 +32,12 @@
struct drm_file;
struct drm_i915_gem_object;
struct drm_i915_gem_request;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
struct drm_i915_gem_request *request;
u32 seqno;
};
@ -119,18 +121,10 @@ struct drm_i915_gem_request {
* The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run.
* It is used by the driver to then queue the request for execution.
*
* The execute fence is used to signal when the request has been
* sent to hardware.
*
* It is illegal for the submit fence of one request to wait upon the
* execute fence of an earlier request. It should be sufficient to
* wait upon the submit fence of the earlier request.
*/
struct i915_sw_fence submit;
struct i915_sw_fence execute;
wait_queue_t submitq;
wait_queue_t execq;
wait_queue_head_t execute;
/* A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the
@ -143,13 +137,12 @@ struct drm_i915_gem_request {
struct i915_priotree priotree;
struct i915_dependency dep;
u32 global_seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
/** GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
*/
u32 previous_seqno;
u32 global_seqno;
/** Position in the ring of the start of the request */
u32 head;
@ -187,7 +180,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
struct list_head client_link;
};
extern const struct dma_fence_ops i915_fence_ops;
@ -200,8 +193,6 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline struct drm_i915_gem_request *
@ -243,6 +234,30 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
/**
* i915_gem_request_global_seqno - report the current global seqno
* @request - the request
*
* A request is assigned a global seqno only when it is on the hardware
* execution queue. The global seqno can be used to maintain a list of
* requests on the same engine in retirement order, for example for
* constructing a priority queue for waiting. Prior to its execution, or
* if it is subsequently removed in the event of preemption, its global
* seqno is zero. As both insertion and removal from the execution queue
* may operate in IRQ context, it is not guarded by the usual struct_mutex
* BKL. Instead those relying on the global seqno must be prepared for its
* value to change between reads. Only when the request is complete can
* the global seqno be stable (due to the memory barriers on submitting
* the commands to the hardware to write the breadcrumb, if the HWS shows
* that it has passed the global seqno and the global seqno is unchanged
* after the read, it is indeed complete).
*/
static u32
i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
{
return READ_ONCE(request->global_seqno);
}
int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
@ -259,6 +274,9 @@ void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
void i915_gem_request_submit(struct drm_i915_gem_request *request);
void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
@ -283,46 +301,55 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
__i915_gem_request_started(const struct drm_i915_gem_request *req)
__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!req->global_seqno);
GEM_BUG_ON(!seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
seqno - 1);
}
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
u32 seqno;
seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;
return __i915_gem_request_started(req);
return __i915_gem_request_started(req, seqno);
}
static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req)
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->global_seqno);
GEM_BUG_ON(!seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
seqno == i915_gem_request_global_seqno(req);
}
static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
u32 seqno;
seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;
return __i915_gem_request_completed(req);
return __i915_gem_request_completed(req, seqno);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us);
u32 seqno, int state, unsigned long timeout_us);
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us));
u32 seqno;
seqno = i915_gem_request_global_seqno(request);
return (__i915_gem_request_started(request, seqno) &&
__i915_spin_request(request, seqno, state, timeout_us));
}
/* We treat requests as fences. This is not be to confused with our

Просмотреть файл

@ -207,7 +207,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!(flags & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
obj->framebuffer_references))
i915_gem_object_is_framebuffer(obj)))
continue;
if (!can_release_pages(obj))
@ -259,10 +259,13 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
unsigned long freed;
intel_runtime_pm_get(dev_priv);
freed = i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv);
rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
return freed;
@ -380,9 +383,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not

Просмотреть файл

@ -79,12 +79,12 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
u32 base;
dma_addr_t base;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at register BSM (0x5c) in the igfx configuration space. On a few
@ -189,14 +189,14 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
base = tom - tseg_size - ggtt->stolen_size;
}
if (base == 0)
if (base == 0 || add_overflows(base, ggtt->stolen_size))
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_GEN(dev_priv) <= 4 &&
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
struct {
u32 start, end;
dma_addr_t start, end;
} stolen[2] = {
{ .start = base, .end = base + ggtt->stolen_size, },
{ .start = base, .end = base + ggtt->stolen_size, },
@ -228,11 +228,13 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
dma_addr_t end = base + ggtt->stolen_size - 1;
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long)ggtt_start,
(unsigned long long)ggtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32)ggtt->stolen_size - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
&base, &end);
}
}
@ -261,8 +263,10 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)ggtt->stolen_size);
dma_addr_t end = base + ggtt->stolen_size;
DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
&base, &end);
base = 0;
}
}
@ -281,13 +285,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
phys_addr_t *base, u32 *size)
dma_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@ -304,7 +308,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
phys_addr_t *base, u32 *size)
dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@ -330,7 +334,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
phys_addr_t *base, u32 *size)
dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@ -350,7 +354,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
phys_addr_t *base, u32 *size)
dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@ -376,11 +380,11 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
phys_addr_t *base, u32 *size)
dma_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
phys_addr_t stolen_top;
dma_addr_t stolen_top;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
@ -399,7 +403,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
phys_addr_t reserved_base, stolen_top;
dma_addr_t reserved_base, stolen_top;
u32 reserved_total, reserved_size;
u32 stolen_usable_start;
@ -420,7 +424,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (ggtt->stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@ -469,8 +473,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
phys_addr_t reserved_top = reserved_base + reserved_size;
DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n",
dma_addr_t reserved_top = reserved_base + reserved_size;
DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
&reserved_base, &reserved_top,
&dev_priv->mm.stolen_base, &stolen_top);
return 0;

Просмотреть файл

@ -158,13 +158,8 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
if (stride > 8192)
return false;
if (IS_GEN3(i915)) {
if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
if (!is_power_of_2(stride))
return false;
}
if (IS_GEN2(i915) ||
@ -176,12 +171,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
if (!stride || !IS_ALIGNED(stride, tile_width))
return false;
/* 965+ just needs multiples of tile width */
if (INTEL_GEN(i915) >= 4)
return true;
/* Pre-965 needs power of two tile widths */
return is_power_of_2(stride);
return true;
}
static bool i915_vma_fence_prepare(struct i915_vma *vma,
@ -248,7 +238,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
if (obj->framebuffer_references)
if (i915_gem_object_is_framebuffer(obj))
return -EBUSY;
/* We need to rebind the object if its current allocation
@ -268,6 +258,12 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (err)
return err;
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@ -304,6 +300,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
obj->tiling_and_stride = tiling | stride;
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);

Просмотреть файл

@ -33,7 +33,13 @@ struct i915_gem_timeline;
struct intel_timeline {
u64 fence_context;
u32 last_submitted_seqno;
u32 seqno;
/**
* Count of outstanding requests, from the time they are constructed
* to the moment they are retired. Loosely coupled to hardware.
*/
u32 inflight_seqnos;
spinlock_t lock;
@ -56,7 +62,6 @@ struct intel_timeline {
struct i915_gem_timeline {
struct list_head link;
atomic_t seqno;
struct drm_i915_private *i915;
const char *name;

Просмотреть файл

@ -342,7 +342,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
static void error_print_instdone(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
const struct drm_i915_error_engine *ee)
{
int slice;
int subslice;
@ -372,7 +372,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
struct drm_i915_error_request *erq)
const struct drm_i915_error_request *erq)
{
if (!erq->seqno)
return;
@ -384,8 +384,17 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
erq->head, erq->tail);
}
static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
ctx->ban_score, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
const struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
@ -457,6 +466,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
error_print_context(m, " Active context: ", &ee->context);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@ -536,21 +546,57 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
#undef PRINT_FLAG
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
const char *name,
const char *type,
const void *x)
{
struct drm_i915_private *dev_priv = error_priv->i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
if (!__builtin_strcmp(type, "bool"))
err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
else if (!__builtin_strcmp(type, "int"))
err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
else if (!__builtin_strcmp(type, "char *"))
err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
else
BUILD_BUG();
}
static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *p)
{
#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
struct pci_dev *pdev = i915->drm.pdev;
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
pdev->subsystem_vendor,
pdev->subsystem_device);
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj;
int i, j;
if (!error) {
err_printf(m, "no error state collected\n");
goto out;
err_printf(m, "No error state collected\n");
return 0;
}
err_printf(m, "%s\n", error->error_msg);
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "Time: %ld s %ld us\n",
error->time.tv_sec, error->time.tv_usec);
@ -558,26 +604,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->boottime.tv_sec, error->boottime.tv_usec);
err_printf(m, "Uptime: %ld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].hangcheck_stalled &&
error->engine[i].pid != -1) {
err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n",
error->engine[i].context.pid) {
err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
engine_str(i),
error->engine[i].comm,
error->engine[i].pid,
error->engine[i].context_bans);
error->engine[i].context.comm,
error->engine[i].context.pid,
error->engine[i].context.ban_score);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
pdev->subsystem_vendor,
pdev->subsystem_device);
err_print_pciid(m, error->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev_priv)) {
@ -590,21 +632,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
CSR_VERSION_MINOR(csr->version));
}
err_printf(m, "GT awake: %s\n", yesno(error->awake));
err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_GEN(dev_priv) >= 8) {
for (i = 0; i < 4; i++)
err_printf(m, "GTIER gt %d: 0x%08x\n", i,
error->gtier[i]);
} else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
for (i = 0; i < error->ngtier; i++)
err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
for (i = 0; i < dev_priv->num_fence_regs; i++)
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
if (INTEL_GEN(dev_priv) >= 6) {
@ -653,16 +694,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
struct drm_i915_error_engine *ee = &error->engine[i];
const struct drm_i915_error_engine *ee = &error->engine[i];
obj = ee->batchbuffer;
if (obj) {
err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d], bans %d)",
ee->comm,
ee->pid,
ee->context_bans);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
ee->context.comm,
ee->context.pid,
ee->context.handle,
ee->context.hw_id,
ee->context.ban_score);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@ -716,9 +759,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
intel_display_print_error_state(m, dev_priv, error->display);
intel_display_print_error_state(m, error->display);
err_print_capabilities(m, &error->device_info);
err_print_params(m, &error->params);
out:
if (m->bytes == 0 && m->err)
return m->err;
@ -770,10 +815,16 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
static void i915_error_state_free(struct kref *error_ref)
static __always_inline void free_param(const char *type, void *x)
{
struct drm_i915_error_state *error = container_of(error_ref,
typeof(*error), ref);
if (!__builtin_strcmp(type, "char *"))
kfree(*(void **)x);
}
void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
container_of(error_ref, typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@ -800,6 +851,11 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
#define FREE(T, x) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
kfree(error);
}
@ -938,7 +994,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_gpu_state *error,
int *engine_id)
{
uint32_t error_code = 0;
@ -963,20 +1019,21 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
}
static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
int i;
if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
} else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
} else if (INTEL_GEN(dev_priv) >= 4) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
}
error->nfence = i;
}
static inline u32
@ -1000,7 +1057,7 @@ gen8_engine_sync_index(struct intel_engine_cs *engine,
return idx;
}
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
static void gen8_record_semaphore_state(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@ -1054,7 +1111,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
if (!spin_trylock_irq(&b->lock)) {
if (!spin_trylock_irq(&b->rb_lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@ -1062,7 +1119,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
waiter = NULL;
if (count)
@ -1072,7 +1129,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
if (!spin_trylock_irq(&b->lock)) {
if (!spin_trylock_irq(&b->rb_lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@ -1080,7 +1137,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
ee->waiters = waiter;
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
strcpy(waiter->comm, w->tsk->comm);
waiter->pid = w->tsk->pid;
@ -1090,10 +1147,10 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
}
static void error_record_engine_registers(struct drm_i915_error_state *error,
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@ -1267,8 +1324,30 @@ static void error_record_engine_execlists(struct intel_engine_cs *engine,
&ee->execlist[n]);
}
static void record_context(struct drm_i915_error_context *e,
struct i915_gem_context *ctx)
{
if (ctx->pid) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
strcpy(e->comm, task->comm);
e->pid = task->pid;
}
rcu_read_unlock();
}
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->ban_score = ctx->ban_score;
e->guilty = ctx->guilty_count;
e->active = ctx->active_count;
}
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int i;
@ -1281,7 +1360,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
ee->pid = -1;
ee->engine_id = -1;
if (!engine)
@ -1296,11 +1374,12 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
request = i915_gem_find_active_request(engine);
if (request) {
struct intel_ring *ring;
struct pid *pid;
ee->vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
record_context(&ee->context, request->ctx);
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
@ -1318,19 +1397,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv,
request->ctx->engine[i].state);
pid = request->ctx->pid;
if (pid) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(pid, PIDTYPE_PID);
if (task) {
strcpy(ee->comm, task->comm);
ee->pid = task->pid;
}
rcu_read_unlock();
}
error->simulated |=
i915_gem_context_no_error_capture(request->ctx);
@ -1357,7 +1423,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
}
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_gpu_state *error,
struct i915_address_space *vm,
int idx)
{
@ -1383,7 +1449,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
}
static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
int cnt = 0, i, j;
@ -1408,7 +1474,7 @@ static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
}
static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_i915_error_buffer *bo;
@ -1439,7 +1505,7 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
}
static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
@ -1451,7 +1517,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
int i;
@ -1508,9 +1574,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
error->ngtier = 4;
} else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
@ -1521,7 +1589,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_gpu_state *error,
u32 engine_mask,
const char *error_msg)
{
@ -1534,12 +1602,12 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
"GPU HANG: ecode %d:%d:0x%08x",
INTEL_GEN(dev_priv), engine_id, ecode);
if (engine_id != -1 && error->engine[engine_id].pid != -1)
if (engine_id != -1 && error->engine[engine_id].context.pid)
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
error->engine[engine_id].comm,
error->engine[engine_id].pid);
error->engine[engine_id].context.comm,
error->engine[engine_id].context.pid);
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
@ -1548,8 +1616,12 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
}
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct i915_gpu_state *error)
{
error->awake = dev_priv->gt.awake;
error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
error->suspended = dev_priv->pm.suspended;
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
error->iommu = intel_iommu_gfx_mapped;
@ -1562,9 +1634,26 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
static __always_inline void dup_param(const char *type, void *x)
{
if (!__builtin_strcmp(type, "char *"))
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
static int capture(void *data)
{
struct drm_i915_error_state *error = data;
struct i915_gpu_state *error = data;
do_gettimeofday(&error->time);
error->boottime = ktime_to_timeval(ktime_get_boottime());
error->uptime =
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
error->params = i915;
#define DUP(T, x) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
i915_capture_gen_state(error->i915, error);
i915_capture_reg_state(error->i915, error);
@ -1574,12 +1663,6 @@ static int capture(void *data)
i915_capture_pinned_buffers(error->i915, error);
i915_gem_capture_guc_log_buffer(error->i915, error);
do_gettimeofday(&error->time);
error->boottime = ktime_to_timeval(ktime_get_boottime());
error->uptime =
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
@ -1588,6 +1671,23 @@ static int capture(void *data)
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
struct i915_gpu_state *
i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error)
return NULL;
kref_init(&error->ref);
error->i915 = i915;
stop_machine(capture, error, NULL);
return error;
}
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@ -1602,7 +1702,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
const char *error_msg)
{
static bool warned;
struct drm_i915_error_state *error;
struct i915_gpu_state *error;
unsigned long flags;
if (!i915.error_capture)
@ -1611,18 +1711,12 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
error = i915_capture_gpu_state(dev_priv);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
kref_init(&error->ref);
error->i915 = dev_priv;
stop_machine(capture, error, NULL);
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@ -1636,7 +1730,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
if (error) {
i915_error_state_free(&error->ref);
__i915_gpu_state_free(&error->ref);
return;
}
@ -1652,33 +1746,28 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
}
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
struct i915_gpu_state *
i915_first_error_state(struct drm_i915_private *i915)
{
struct drm_i915_private *dev_priv = to_i915(dev);
spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irq(&dev_priv->gpu_error.lock);
}
void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
{
if (error_priv->error)
kref_put(&error_priv->error->ref, i915_error_state_free);
}
void i915_destroy_error_state(struct drm_i915_private *dev_priv)
{
struct drm_i915_error_state *error;
spin_lock_irq(&dev_priv->gpu_error.lock);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
spin_unlock_irq(&dev_priv->gpu_error.lock);
struct i915_gpu_state *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (error)
kref_put(&error->ref, i915_error_state_free);
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
return error;
}
void i915_reset_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
i915_gpu_state_put(error);
}

Просмотреть файл

@ -348,7 +348,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
u32 freespace;
int ret;
spin_lock(&client->wq_lock);
spin_lock_irq(&client->wq_lock);
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
freespace -= client->wq_rsvd;
if (likely(freespace >= wqi_size)) {
@ -358,21 +358,27 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
client->no_wq_space++;
ret = -EAGAIN;
}
spin_unlock(&client->wq_lock);
spin_unlock_irq(&client->wq_lock);
return ret;
}
static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
{
unsigned long flags;
spin_lock_irqsave(&client->wq_lock, flags);
client->wq_rsvd += size;
spin_unlock_irqrestore(&client->wq_lock, flags);
}
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
const int wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
spin_lock(&client->wq_lock);
client->wq_rsvd -= wqi_size;
spin_unlock(&client->wq_lock);
guc_client_update_wq_rsvd(client, -wqi_size);
}
/* Construct a Work Item and append it to the GuC's Work Queue */
@ -509,15 +515,18 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
unsigned long flags;
int b_ret;
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
/* WA to flush out the pending GMADR writes to ring buffer. */
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
trace_i915_gem_request_in(rq, 0);
spin_lock_irqsave(&client->wq_lock, flags);
guc_wq_item_append(client, rq);
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@ -527,7 +536,8 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock);
spin_unlock_irqrestore(&client->wq_lock, flags);
}
static void i915_guc_submit(struct drm_i915_gem_request *rq)
@ -943,16 +953,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv, id) {
const int wqi_size = sizeof(struct guc_wq_item);
struct drm_i915_gem_request *rq;
engine->submit_request = i915_guc_submit;
engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
guc_client_update_wq_rsvd(client, wqi_size);
__i915_guc_submit(rq);
}
spin_unlock_irq(&engine->timeline->lock);
}
return 0;

Просмотреть файл

@ -180,7 +180,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
{
uint32_t val;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
@ -222,7 +222,7 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@ -250,7 +250,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@ -302,7 +302,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
@ -340,7 +340,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
I915_WRITE(reg, reset_mask);
I915_WRITE(reg, reset_mask);
@ -349,7 +349,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
@ -359,7 +359,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
@ -463,7 +463,7 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
uint32_t new_val;
uint32_t old_val;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@ -496,7 +496,7 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@ -530,7 +530,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@ -546,7 +546,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@ -573,7 +573,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@ -783,6 +783,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
int position, vtotal;
if (!crtc->active)
return -1;
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
@ -1033,9 +1036,42 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
smp_store_mb(engine->breadcrumbs.irq_posted, true);
if (intel_engine_wakeup(engine))
trace_i915_gem_request_notify(engine);
struct drm_i915_gem_request *rq = NULL;
struct intel_wait *wait;
atomic_inc(&engine->irq_count);
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
/* We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
* the signaler to be woken up. We still wake up the
* waiter in order to handle the irq-seqno coherency
* issues (we may receive the interrupt before the
* seqno is written, see __i915_request_irq_complete())
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
wait->seqno))
rq = i915_gem_request_get(wait->request);
wake_up_process(wait->tsk);
} else {
__intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
dma_fence_signal(&rq->fence);
i915_gem_request_put(rq);
}
trace_intel_engine_notify(engine, wait);
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@ -1173,20 +1209,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.efficient_freq - adj) {
new_delay = dev_priv->rps.efficient_freq;
adj = 0;
}
} else if (client_boost || any_waiters(dev_priv)) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
else
else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
@ -1209,7 +1237,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
intel_set_rps(dev_priv, new_delay);
if (intel_set_rps(dev_priv, new_delay)) {
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
@ -1349,8 +1380,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
tasklet_schedule(&engine->irq_tasklet);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet_hi_schedule(&engine->irq_tasklet);
}
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@ -3106,9 +3140,34 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
return enabled_irqs;
}
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTB_PULSE_DURATION_MASK |
PORTC_PULSE_DURATION_MASK |
PORTD_PULSE_DURATION_MASK);
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
/*
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
if (HAS_PCH_LPT_LP(dev_priv))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, hotplug, enabled_irqs;
u32 hotplug_irqs, enabled_irqs;
if (HAS_PCH_IBX(dev_priv)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
@ -3120,23 +3179,7 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
/*
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
if (HAS_PCH_LPT_LP(dev_priv))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
ibx_hpd_detection_setup(dev_priv);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@ -3168,9 +3211,25 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
}
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, hotplug, enabled_irqs;
u32 hotplug_irqs, enabled_irqs;
if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
@ -3189,15 +3248,7 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
ilk_hpd_detection_setup(dev_priv);
ibx_hpd_irq_setup(dev_priv);
}
@ -3268,7 +3319,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
HAS_PCH_LPT(dev_priv))
; /* TODO: Enable HPD detection on older PCH platforms too */
ibx_hpd_detection_setup(dev_priv);
else
spt_hpd_detection_setup(dev_priv);
}
@ -3345,6 +3396,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
gen5_gt_irq_postinstall(dev);
ilk_hpd_detection_setup(dev_priv);
ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev_priv)) {
@ -3363,7 +3416,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
return;
@ -3378,7 +3431,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if (!dev_priv->display_irqs_enabled)
return;
@ -3485,6 +3538,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_GEN9_LP(dev_priv))
bxt_hpd_detection_setup(dev_priv);
else if (IS_BROADWELL(dev_priv))
ilk_hpd_detection_setup(dev_priv);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@ -4052,7 +4107,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_en;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
@ -4265,6 +4320,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
* special care to avoid writing any of the display block registers
* outside of the power domain. We defer setting up the display irqs
* in this case to the runtime pm.
*/
dev_priv->display_irqs_enabled = true;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display_irqs_enabled = false;
dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;

Просмотреть файл

@ -145,7 +145,7 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400);
MODULE_PARM_DESC(alpha_support,
"Enable alpha quality driver support for latest hardware. "
"See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
@ -205,9 +205,9 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
"Force enable atomic functionality on platforms that don't have full support yet.");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);

Просмотреть файл

@ -27,46 +27,51 @@
#include <linux/cache.h> /* for __read_mostly */
#define I915_PARAMS_FOR_EACH(func) \
func(int, modeset); \
func(int, panel_ignore_lid); \
func(int, semaphores); \
func(int, lvds_channel_mode); \
func(int, panel_use_ssc); \
func(int, vbt_sdvo_panel_type); \
func(int, enable_rc6); \
func(int, enable_dc); \
func(int, enable_fbc); \
func(int, enable_ppgtt); \
func(int, enable_execlists); \
func(int, enable_psr); \
func(int, disable_power_well); \
func(int, enable_ips); \
func(int, invert_brightness); \
func(int, enable_guc_loading); \
func(int, enable_guc_submission); \
func(int, guc_log_level); \
func(int, use_mmio_flip); \
func(int, mmio_debug); \
func(int, edp_vswing); \
func(unsigned int, inject_load_failure); \
/* leave bools at the end to not create holes */ \
func(bool, alpha_support); \
func(bool, enable_cmd_parser); \
func(bool, enable_hangcheck); \
func(bool, fastboot); \
func(bool, prefault_disable); \
func(bool, load_detect_test); \
func(bool, force_reset_modeset_test); \
func(bool, reset); \
func(bool, error_capture); \
func(bool, disable_display); \
func(bool, verbose_state_checks); \
func(bool, nuclear_pageflip); \
func(bool, enable_dp_mst); \
func(bool, enable_dpcd_backlight); \
func(bool, enable_gvt)
#define MEMBER(T, member) T member
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int alpha_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_guc_loading;
int enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
bool enable_cmd_parser;
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool force_reset_modeset_test;
bool reset;
bool error_capture;
bool disable_display;
bool verbose_state_checks;
bool nuclear_pageflip;
bool enable_dp_mst;
bool enable_dpcd_backlight;
bool enable_gvt;
I915_PARAMS_FOR_EACH(MEMBER);
};
#undef MEMBER
extern struct i915_params i915 __read_mostly;

Просмотреть файл

@ -27,6 +27,7 @@
#include <linux/vga_switcheroo.h>
#include "i915_drv.h"
#include "i915_selftest.h"
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
@ -403,6 +404,7 @@ static const struct intel_device_info intel_geminilake_info = {
.platform = INTEL_GEMINILAKE,
.is_alpha_support = 1,
.ddb_size = 1024,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
};
static const struct intel_device_info intel_kabylake_info = {
@ -472,10 +474,19 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
i915_driver_unload(dev);
drm_dev_unref(dev);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
int err;
if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
@ -499,15 +510,17 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
return i915_driver_load(pdev, ent);
}
err = i915_driver_load(pdev, ent);
if (err)
return err;
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
err = i915_live_selftests(pdev);
if (err) {
i915_pci_remove(pdev);
return err > 0 ? -ENOTTY : err;
}
i915_driver_unload(dev);
drm_dev_unref(dev);
return 0;
}
static struct pci_driver i915_pci_driver = {
@ -521,6 +534,11 @@ static struct pci_driver i915_pci_driver = {
static int __init i915_init(void)
{
bool use_kms = true;
int err;
err = i915_mock_selftests();
if (err)
return err > 0 ? 0 : err;
/*
* Enable KMS by default, unless explicitly overriden by

Просмотреть файл

@ -1008,7 +1008,7 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->perf.hook_lock);
lockdep_assert_held(&dev_priv->perf.hook_lock);
if (dev_priv->perf.oa.exclusive_stream->enabled) {
struct i915_gem_context *ctx =

Просмотреть файл

@ -48,6 +48,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
#define _PLANE(plane, a, b) _PIPE(plane, a, b)
@ -56,14 +58,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
#define _PIPE3(pipe, ...) _PICK(pipe, __VA_ARGS__)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
#define _PORT3(port, ...) _PICK(port, __VA_ARGS__)
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
(phy) == DPIO_PHY1 ? (b) : (c))
#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
#define _MASKED_FIELD(mask, value) ({ \
@ -78,7 +77,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
/* Engine ID */
#define RCS_HW 0
#define VCS_HW 1
#define BCS_HW 2
#define VECS_HW 3
#define VCS2_HW 4
/* PCI config space */
@ -120,7 +125,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4)
#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
@ -1553,6 +1558,7 @@ enum skl_disp_power_wells {
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define MIPIO_RST_CTRL (1 << 2)
#define _BXT_PHY_CTL_DDI_A 0x64C00
#define _BXT_PHY_CTL_DDI_B 0x64C10
@ -3376,10 +3382,22 @@ enum {
INTEL_LEGACY_64B_CONTEXT
};
enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
FAULT_AND_STREAM,
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_VALID (1<<0)
#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
#define GEN8_CTX_FORCE_RESTORE (1<<2)
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
INTEL_LEGACY_64B_CONTEXT : \
INTEL_LEGACY_32B_CONTEXT)
#define GEN8_CTX_ID_SHIFT 32
#define GEN8_CTX_ID_WIDTH 21
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@ -5887,11 +5905,18 @@ enum {
#define _PLANE_KEYMSK_2_A 0x70298
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13)
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
#define _PLANE_CTL_3_B 0x71380
@ -5986,7 +6011,17 @@ enum {
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
/* SKL new cursor registers */
#define _PLANE_COLOR_CTL_1_B 0x711CC
#define _PLANE_COLOR_CTL_2_B 0x712CC
#define _PLANE_COLOR_CTL_3_B 0x713CC
#define _PLANE_COLOR_CTL_1(pipe) \
_PIPE(pipe, _PLANE_COLOR_CTL_1_A, _PLANE_COLOR_CTL_1_B)
#define _PLANE_COLOR_CTL_2(pipe) \
_PIPE(pipe, _PLANE_COLOR_CTL_2_A, _PLANE_COLOR_CTL_2_B)
#define PLANE_COLOR_CTL(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_COLOR_CTL_1(pipe), _PLANE_COLOR_CTL_2(pipe))
#/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
@ -6466,6 +6501,11 @@ enum {
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
#define CHICKEN_MISC_2 _MMIO(0x42084)
#define GLK_CL0_PWR_DOWN (1 << 10)
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL2_PWR_DOWN (1 << 12)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@ -8167,6 +8207,7 @@ enum {
#define PAL_PREC_10_12_BIT (0 << 31)
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@ -8176,12 +8217,26 @@ enum {
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
#define _PAL_PREC_EXT2_GC_MAX_A 0x4A430
#define _PAL_PREC_EXT2_GC_MAX_B 0x4AC30
#define _PAL_PREC_EXT2_GC_MAX_C 0x4B430
#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
#define _PRE_CSC_GAMC_INDEX_C 0x4B484
#define PRE_CSC_GAMC_AUTO_INCREMENT (1 << 10)
#define _PRE_CSC_GAMC_DATA_A 0x4A488
#define _PRE_CSC_GAMC_DATA_B 0x4AC88
#define _PRE_CSC_GAMC_DATA_C 0x4B488
#define PRE_CSC_GAMC_INDEX(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_INDEX_A, _PRE_CSC_GAMC_INDEX_B)
#define PRE_CSC_GAMC_DATA(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_DATA_A, _PRE_CSC_GAMC_DATA_B)
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
@ -8215,9 +8270,14 @@ enum {
/* MIPI DSI registers */
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
@ -8304,10 +8364,12 @@ enum {
#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSIC_16X_BY1 (0 << 10)
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
#define BXT_DSIC_16X_MASK (3 << 10)
#define BXT_DSIA_16X_BY1 (0 << 8)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
@ -8317,6 +8379,8 @@ enum {
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
#define GLK_DSI_PLL_RATIO_MAX 0x6F
#define GLK_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
#define BXT_REF_CLOCK_KHZ 19200
@ -8333,6 +8397,12 @@ enum {
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
#define HS_IO_CTRL_SELECT (1 << 0)
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@ -8586,6 +8656,14 @@ enum {
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)

Просмотреть файл

@ -0,0 +1,106 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __I915_SELFTEST_H__
#define __I915_SELFTEST_H__
struct pci_dev;
struct drm_i915_private;
struct i915_selftest {
unsigned long timeout_jiffies;
unsigned int timeout_ms;
unsigned int random_seed;
int mock;
int live;
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include <linux/fault-inject.h>
extern struct i915_selftest i915_selftest;
int i915_mock_selftests(void);
int i915_live_selftests(struct pci_dev *pdev);
/* We extract the function declarations from i915_mock_selftests.h and
* i915_live_selftests.h Add your unit test declarations there!
*
* Mock unit tests are run very early upon module load, before the driver
* is probed. All hardware interactions, as well as other subsystems, must
* be "mocked".
*
* Live unit tests are run after the driver is loaded - all hardware
* interactions are real.
*/
#define selftest(name, func) int func(void);
#include "selftests/i915_mock_selftests.h"
#undef selftest
#define selftest(name, func) int func(struct drm_i915_private *i915);
#include "selftests/i915_live_selftests.h"
#undef selftest
struct i915_subtest {
int (*func)(void *data);
const char *name;
};
int __i915_subtests(const char *caller,
const struct i915_subtest *st,
unsigned int count,
void *data);
#define i915_subtests(T, data) \
__i915_subtests(__func__, T, ARRAY_SIZE(T), data)
#define SUBTEST(x) { x, #x }
#define I915_SELFTEST_DECLARE(x) x
#define I915_SELFTEST_ONLY(x) unlikely(x)
#else /* !IS_ENABLED(CONFIG_DRM_I915_SELFTEST) */
static inline int i915_mock_selftests(void) { return 0; }
static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
#define I915_SELFTEST_DECLARE(x)
#define I915_SELFTEST_ONLY(x) 0
#endif
/* Using the i915_selftest_ prefix becomes a little unwieldy with the helpers.
* Instead we use the igt_ shorthand, in reference to the intel-gpu-tools
* suite of uabi test cases (which includes a test runner for our selftests).
*/
#define IGT_TIMEOUT(name__) \
unsigned long name__ = jiffies + i915_selftest.timeout_jiffies
__printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
__igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
#define igt_can_mi_store_dword_imm(D) (INTEL_GEN(D) > 2)
#endif /* !__I915_SELFTEST_H__ */

Просмотреть файл

@ -395,13 +395,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
intel_set_rps(dev_priv, val);
ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count;
return ret ?: count;
}
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@ -448,14 +448,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
intel_set_rps(dev_priv, val);
ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count;
return ret ?: count;
}
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
@ -523,33 +522,27 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct drm_device *dev = &dev_priv->drm;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
ssize_t ret_count = 0;
int ret;
struct i915_gpu_state *gpu;
ssize_t ret;
memset(&error_priv, 0, sizeof(error_priv));
ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
if (ret)
return ret;
error_priv.i915 = dev_priv;
i915_error_state_get(dev, &error_priv);
ret = i915_error_state_to_str(&error_str, &error_priv);
gpu = i915_first_error_state(dev_priv);
ret = i915_error_state_to_str(&error_str, gpu);
if (ret)
goto out;
ret_count = count < error_str.bytes ? count : error_str.bytes;
ret = count < error_str.bytes ? count : error_str.bytes;
memcpy(buf, error_str.buf, ret);
memcpy(buf, error_str.buf, ret_count);
out:
i915_error_state_put(&error_priv);
i915_gpu_state_put(gpu);
i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
return ret;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
@ -560,7 +553,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
i915_destroy_error_state(dev_priv);
i915_reset_error_state(dev_priv);
return count;
}

Просмотреть файл

@ -14,6 +14,206 @@
#define TRACE_SYSTEM i915
#define TRACE_INCLUDE_FILE i915_trace
/* watermark/fifo updates */
TRACE_EVENT(intel_cpu_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = pipe;
__entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
__entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
),
TP_printk("pipe %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe),
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_pch_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder),
TP_ARGS(dev_priv, pch_transcoder),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
enum pipe pipe = (enum pipe)pch_transcoder;
__entry->pipe = pipe;
__entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
__entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
),
TP_printk("pch transcoder %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe),
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_memory_cxsr,
TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
TP_ARGS(dev_priv, old, new),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(bool, old)
__field(bool, new)
),
TP_fast_assign(
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
__entry->frame[pipe] =
dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
__entry->scanline[pipe] =
intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
}
__entry->old = old;
__entry->new = new;
),
TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
onoff(__entry->old), onoff(__entry->new),
__entry->frame[PIPE_A], __entry->scanline[PIPE_A],
__entry->frame[PIPE_B], __entry->scanline[PIPE_B],
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
);
TRACE_EVENT(vlv_wm,
TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
TP_ARGS(crtc, wm),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, level)
__field(u32, cxsr)
__field(u32, primary)
__field(u32, sprite0)
__field(u32, sprite1)
__field(u32, cursor)
__field(u32, sr_plane)
__field(u32, sr_cursor)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->level = wm->level;
__entry->cxsr = wm->cxsr;
__entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
__entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
__entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
__entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
__entry->sr_plane = wm->sr.plane;
__entry->sr_cursor = wm->sr.cursor;
),
TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->level, __entry->cxsr,
__entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
__entry->sr_plane, __entry->sr_cursor)
);
TRACE_EVENT(vlv_fifo_size,
TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, sprite0_start)
__field(u32, sprite1_start)
__field(u32, fifo_size)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->sprite0_start = sprite0_start;
__entry->sprite1_start = sprite1_start;
__entry->fifo_size = fifo_size;
),
TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->sprite0_start,
__entry->sprite1_start, __entry->fifo_size)
);
/* plane updates */
TRACE_EVENT(intel_update_plane,
TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
__array(int, dst, 4)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
pipe_name(__entry->pipe), __entry->name,
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
TRACE_EVENT(intel_disable_plane,
TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
pipe_name(__entry->pipe), __entry->name,
__entry->frame, __entry->scanline)
);
/* pipe updates */
TRACE_EVENT(i915_pipe_update_start,
@ -175,134 +375,6 @@ TRACE_EVENT(i915_vma_unbind,
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_va_alloc,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u64, start)
__field(u64, end)
),
TP_fast_assign(
__entry->vm = vma->vm;
__entry->start = vma->node.start;
__entry->end = vma->node.start + vma->node.size - 1;
),
TP_printk("vm=%p (%c), 0x%llx-0x%llx",
__entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
);
DECLARE_EVENT_CLASS(i915_px_entry,
TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
TP_ARGS(vm, px, start, px_shift),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, px)
__field(u64, start)
__field(u64, end)
),
TP_fast_assign(
__entry->vm = vm;
__entry->px = px;
__entry->start = start;
__entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
),
TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
TP_ARGS(vm, pde, start, pde_shift)
);
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
TP_ARGS(vm, pdpe, start, pdpe_shift),
TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
TP_ARGS(vm, pml4e, start, pml4e_shift),
TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
__entry->vm, __entry->px, __entry->start, __entry->end)
);
/* Avoid extra math because we only support two sizes. The format is defined by
* bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
#define TRACE_PT_SIZE(bits) \
((((bits) == 1024) ? 288 : 144) + 1)
DECLARE_EVENT_CLASS(i915_page_table_entry_update,
TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, pde)
__field(u32, first)
__field(u32, last)
__dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
),
TP_fast_assign(
__entry->vm = vm;
__entry->pde = pde;
__entry->first = first;
__entry->last = first + count - 1;
scnprintf(__get_str(cur_ptes),
TRACE_PT_SIZE(bits),
"%*pb",
bits,
pt->used_ptes);
),
TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
__entry->vm, __entry->pde, __entry->last, __entry->first,
__get_str(cur_ptes))
);
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits)
);
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
__entry->read_domains = obj->base.read_domains | (old_read << 16);
__entry->write_domain = obj->base.write_domain | (old_write << 16);
),
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
__entry->read_domains >> 16,
__entry->read_domains & 0xffff,
__entry->write_domain >> 16,
__entry->write_domain & 0xffff)
);
TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
@ -503,13 +575,14 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->seqno)
);
TRACE_EVENT(i915_gem_ring_dispatch,
TRACE_EVENT(i915_gem_request_queue,
TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
__field(u32, flags)
),
@ -517,13 +590,14 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->seqno = req->global_seqno;
__entry->ctx = req->ctx->hw_id;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
dma_fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
__entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
@ -555,18 +629,23 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ctx)
__field(u32, ring)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ctx = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->seqno = req->global_seqno;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
__entry->global)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@ -574,24 +653,100 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *engine),
TP_ARGS(engine),
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_gem_request_hw,
TP_PROTO(struct drm_i915_gem_request *req,
unsigned int port),
TP_ARGS(req, port),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(u32, global_seqno)
__field(u32, ctx)
__field(u32, port)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->ctx = req->ctx->hw_id;
__entry->seqno = req->fence.seqno;
__entry->global_seqno = req->global_seqno;
__entry->port = port;
),
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
__entry->dev, __entry->ring, __entry->ctx,
__entry->seqno, __entry->global_seqno,
__entry->port)
);
DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
TP_ARGS(req, port)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
{
}
static inline void
trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
{
}
static inline void
trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
{
}
static inline void
trace_i915_gem_request_out(struct drm_i915_gem_request *req)
{
}
#endif
#endif
TRACE_EVENT(intel_engine_notify,
TP_PROTO(struct intel_engine_cs *engine, bool waiters),
TP_ARGS(engine, waiters),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
__entry->ring = engine->id;
__entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
__entry->dev, __entry->ring, __entry->seqno,
__entry->waiters)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
@ -599,20 +754,17 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_ARGS(req)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
__field(bool, blocking)
__field(u32, global)
__field(unsigned int, flags)
),
/* NB: the blocking information is racy since mutex_is_locked
@ -624,14 +776,16 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->seqno = req->global_seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
__entry->ctx = req->ctx->hw_id;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
__entry->flags = flags;
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no")
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
__entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
@ -769,17 +923,19 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
__field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ctx, __entry->vm)
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
__entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
)
DEFINE_EVENT(i915_context, i915_context_create,

Просмотреть файл

@ -25,6 +25,17 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
#if GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
#else
#define add_overflows(A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
a + b < a; \
})
#endif
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \

Просмотреть файл

@ -179,7 +179,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long ggtt_end = ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
@ -202,8 +202,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt->base.start ||
mappable_end > ggtt->mappable_end ||
if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
@ -231,9 +230,9 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt->base.start) {
if (mappable_base) {
ret = vgt_balloon_space(ggtt, &bl_info.space[0],
ggtt->base.start, mappable_base);
0, mappable_base);
if (ret)
goto err;

Просмотреть файл

@ -78,6 +78,9 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
int i;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@ -238,7 +241,15 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 vma_flags;
int ret;
if (WARN_ON(flags == 0))
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
if (GEM_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total)))
return -ENODEV;
if (GEM_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
@ -255,20 +266,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
if (GEM_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total)))
return -ENODEV;
if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
if (ret)
return ret;
}
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
@ -324,8 +321,8 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
__i915_gem_object_release_unless_active(obj);
}
bool
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
if (!drm_mm_node_allocated(&vma->node))
return false;
@ -512,10 +509,36 @@ err_unpin:
return ret;
}
static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
unsigned int bound = vma->flags;
const unsigned int bound = vma->flags;
int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@ -524,18 +547,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY;
goto err;
goto err_unpin;
}
if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags);
if (ret)
goto err;
goto err_unpin;
}
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
goto err;
goto err_remove;
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
@ -544,7 +567,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
err:
err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
GEM_BUG_ON(vma->pages);
i915_vma_remove(vma);
}
err_unpin:
__i915_vma_unpin(vma);
return ret;
}
@ -657,9 +685,6 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages);
@ -667,18 +692,7 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->pages = NULL;
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
i915_vma_remove(vma);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
@ -687,3 +701,6 @@ destroy:
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif

Просмотреть файл

@ -228,8 +228,8 @@ i915_vma_compare(struct i915_vma *vma,
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);

Просмотреть файл

@ -99,6 +99,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
@ -121,7 +122,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev: DRM device
* @dev_priv: i915 device
* @crtc: intel crtc
* @crtc_state: incoming crtc_state to validate and setup scalers
*
@ -136,9 +137,9 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
* 0 - scalers were setup succesfully
* error code - otherwise
*/
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
@ -199,7 +200,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
*/
if (!plane) {
struct drm_plane_state *state;
plane = drm_plane_from_index(dev, i);
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
@ -247,7 +248,9 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
}
/* set scaler mode */
if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
if (IS_GEMINILAKE(dev_priv)) {
scaler_state->scalers[*scaler_id].mode = 0;
} else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*
* when only 1 scaler is in use on either pipe A or B,
* scaler 0 operates in high quality (HQ) mode.

Просмотреть файл

@ -189,6 +189,12 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
/* FIXME pre-g4x don't work like this */
if (intel_state->base.visible)
crtc_state->active_planes |= BIT(intel_plane->id);
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
@ -225,12 +231,19 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
if (intel_state->base.visible)
if (intel_state->base.visible) {
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
intel_state);
else
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
intel_plane->disable_plane(plane, crtc);
}
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {

Просмотреть файл

@ -720,7 +720,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
if (!IS_GEN9_BC(dev_priv))
return;
i915_audio_component_get_power(kdev);
@ -752,7 +752,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
return dev_priv->cdclk_freq;
return dev_priv->cdclk.hw.cdclk;
}
/*

Просмотреть файл

@ -27,22 +27,107 @@
#include "i915_drv.h"
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
struct intel_wait *wait;
unsigned int result = 0;
lockdep_assert_held(&b->irq_lock);
wait = b->irq_wait;
if (wait) {
result = ENGINE_WAKEUP_WAITER;
if (wake_up_process(wait->tsk))
result |= ENGINE_WAKEUP_ASLEEP;
}
return result;
}
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
unsigned int result;
spin_lock_irqsave(&b->irq_lock, flags);
result = __intel_breadcrumbs_wakeup(b);
spin_unlock_irqrestore(&b->irq_lock, flags);
return result;
}
static unsigned long wait_timeout(void)
{
return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
}
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
engine->name, __builtin_return_address(0),
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted)));
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
static void intel_breadcrumbs_hangcheck(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_enabled)
if (!b->irq_armed)
return;
if (time_before(jiffies, b->timeout)) {
mod_timer(&b->hangcheck, b->timeout);
if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
b->hangcheck_interrupts = atomic_read(&engine->irq_count);
mod_timer(&b->hangcheck, wait_timeout());
return;
}
DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
/* We keep the hangcheck time alive until we disarm the irq, even
* if there are no waiters at present.
*
* If the waiter was currently running, assume it hasn't had a chance
* to process the pending interrupt (e.g, low priority task on a loaded
* system) and wait until it sleeps before declaring a missed interrupt.
*
* If the waiter was asleep (and not even pending a wakeup), then we
* must have missed an interrupt as the GPU has stopped advancing
* but we still have a waiter. Assuming all batches complete within
* DRM_I915_HANGCHECK_JIFFIES [1.5s]!
*/
if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
missed_breadcrumb(engine);
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
} else {
mod_timer(&b->hangcheck, wait_timeout());
}
}
static void intel_breadcrumbs_fake_irq(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
/*
* The timer persists in case we cannot enable interrupts,
* or if we have previously seen seqno/interrupt incoherency
* ("missed interrupt" syndrome). Here the worker will wake up
* every jiffie in order to kick the oldest waiter to do the
* coherent seqno check.
*/
spin_lock_irqsave(&b->irq_lock, flags);
if (!__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irqrestore(&b->irq_lock, flags);
if (!b->irq_armed)
return;
mod_timer(&b->fake_irq, jiffies + 1);
/* Ensure that even if the GPU hangs, we get woken up.
*
@ -56,33 +141,13 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
i915_queue_hangcheck(engine->i915);
}
static unsigned long wait_timeout(void)
{
return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
}
static void intel_breadcrumbs_fake_irq(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
/*
* The timer persists in case we cannot enable interrupts,
* or if we have previously seen seqno/interrupt incoherency
* ("missed interrupt" syndrome). Here the worker will wake up
* every jiffie in order to kick the oldest waiter to do the
* coherent seqno check.
*/
if (intel_engine_wakeup(engine))
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
}
static void irq_enable(struct intel_engine_cs *engine)
{
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
*/
engine->breadcrumbs.irq_posted = true;
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
@ -96,8 +161,68 @@ static void irq_disable(struct intel_engine_cs *engine)
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
}
engine->breadcrumbs.irq_posted = false;
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
lockdep_assert_held(&b->irq_lock);
if (b->irq_enabled) {
irq_disable(engine);
b->irq_enabled = false;
}
b->irq_armed = false;
}
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
if (!b->irq_armed)
return;
spin_lock_irqsave(&b->irq_lock, flags);
/* We only disarm the irq when we are idle (all requests completed),
* so if there remains a sleeping waiter, it missed the request
* completion.
*/
if (__intel_breadcrumbs_wakeup(b) & ENGINE_WAKEUP_ASLEEP)
missed_breadcrumb(engine);
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irqrestore(&b->irq_lock, flags);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
{
const struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
return false;
/* Only start with the heavy weight fake irq timer if we have not
* seen any interrupts since enabling it the first time. If the
* interrupts are still arriving, it means we made a mistake in our
* engine->seqno_barrier(), a timing error that should be transient
* and unlikely to reoccur.
*/
return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
}
static void enable_fake_irq(struct intel_breadcrumbs *b)
{
/* Ensure we never sleep indefinitely */
if (!b->irq_enabled || use_fake_irq(b))
mod_timer(&b->fake_irq, jiffies + 1);
else
mod_timer(&b->hangcheck, wait_timeout());
}
static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
@ -106,17 +231,35 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
container_of(b, struct intel_engine_cs, breadcrumbs);
struct drm_i915_private *i915 = engine->i915;
assert_spin_locked(&b->lock);
if (b->rpm_wakelock)
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
return;
/* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. For completeness,
* record an rpm reference for ourselves to cover the
* interrupt we unmask.
/* The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
b->irq_armed = true;
GEM_BUG_ON(b->irq_enabled);
if (I915_SELFTEST_ONLY(b->mock)) {
/* For our mock objects we want to avoid interaction
* with the real hardware (which is not set up). So
* we simply pretend we have enabled the powerwell
* and the irq, and leave it up to the mock
* implementation to call intel_engine_wakeup()
* itself when it wants to simulate a user interrupt,
*/
return;
}
/* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
* the driver is idle) we disarm the breadcrumbs.
*/
intel_runtime_pm_get_noresume(i915);
b->rpm_wakelock = true;
/* No interrupts? Kick the waiter every jiffie! */
if (intel_irqs_enabled(i915)) {
@ -125,32 +268,7 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
b->irq_enabled = true;
}
if (!b->irq_enabled ||
test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
mod_timer(&b->fake_irq, jiffies + 1);
} else {
/* Ensure we never sleep indefinitely */
GEM_BUG_ON(!time_after(b->timeout, jiffies));
mod_timer(&b->hangcheck, b->timeout);
}
}
static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
assert_spin_locked(&b->lock);
if (!b->rpm_wakelock)
return;
if (b->irq_enabled) {
irq_disable(engine);
b->irq_enabled = false;
}
intel_runtime_pm_put(engine->i915);
b->rpm_wakelock = false;
enable_fake_irq(b);
}
static inline struct intel_wait *to_wait(struct rb_node *node)
@ -161,7 +279,7 @@ static inline struct intel_wait *to_wait(struct rb_node *node)
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
struct intel_wait *wait)
{
assert_spin_locked(&b->lock);
lockdep_assert_held(&b->rb_lock);
/* This request is completed, so remove it from the tree, mark it as
* complete, and *then* wake up the associated task.
@ -172,6 +290,24 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
wake_up_process(wait->tsk); /* implicit smp_wmb() */
}
static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
struct rb_node *next)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
spin_lock(&b->irq_lock);
GEM_BUG_ON(!b->irq_armed);
b->irq_wait = to_wait(next);
spin_unlock(&b->irq_lock);
/* We always wake up the next waiter that takes over as the bottom-half
* as we may delegate not only the irq-seqno barrier to the next waiter
* but also the task of waking up concurrent waiters.
*/
if (next)
wake_up_process(to_wait(next)->tsk);
}
static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
@ -235,7 +371,6 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
}
rb_link_node(&wait->node, parent, p);
rb_insert_color(&wait->node, &b->waiters);
GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
if (completed) {
struct rb_node *next = rb_next(completed);
@ -243,22 +378,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(!next && !first);
if (next && next != &wait->node) {
GEM_BUG_ON(first);
b->timeout = wait_timeout();
b->first_wait = to_wait(next);
rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
/* As there is a delay between reading the current
* seqno, processing the completed tasks and selecting
* the next waiter, we may have missed the interrupt
* and so need for the next bottom-half to wakeup.
*
* Also as we enable the IRQ, we may miss the
* interrupt for that seqno, so we have to wake up
* the next bottom-half in order to do a coherent check
* in case the seqno passed.
*/
__intel_breadcrumbs_enable_irq(b);
if (READ_ONCE(b->irq_posted))
wake_up_process(to_wait(next)->tsk);
__intel_breadcrumbs_next(engine, next);
}
do {
@ -269,10 +389,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
}
if (first) {
spin_lock(&b->irq_lock);
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
b->timeout = wait_timeout();
b->first_wait = wait;
rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
b->irq_wait = wait;
/* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt.
* Either we miss the interrupt whilst programming the hardware,
@ -282,10 +401,10 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* and so we miss the wake up.
*/
__intel_breadcrumbs_enable_irq(b);
spin_unlock(&b->irq_lock);
}
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
GEM_BUG_ON(!b->first_wait);
GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
GEM_BUG_ON(!b->irq_wait);
GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
return first;
}
@ -296,9 +415,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
spin_lock_irq(&b->lock);
spin_lock_irq(&b->rb_lock);
first = __intel_engine_add_wait(engine, wait);
spin_unlock_irq(&b->lock);
spin_unlock_irq(&b->rb_lock);
return first;
}
@ -317,29 +436,20 @@ static inline int wakeup_priority(struct intel_breadcrumbs *b,
return tsk->prio;
}
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* Quick check to see if this waiter was already decoupled from
* the tree by the bottom-half to avoid contention on the spinlock
* by the herd.
*/
if (RB_EMPTY_NODE(&wait->node))
return;
spin_lock_irq(&b->lock);
lockdep_assert_held(&b->rb_lock);
if (RB_EMPTY_NODE(&wait->node))
goto out_unlock;
goto out;
if (b->first_wait == wait) {
if (b->irq_wait == wait) {
const int priority = wakeup_priority(b, wait->tsk);
struct rb_node *next;
GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
/* We are the current bottom-half. Find the next candidate,
* the first waiter in the queue on the remaining oldest
* request. As multiple seqnos may complete in the time it
@ -372,25 +482,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
}
}
if (next) {
/* In our haste, we may have completed the first waiter
* before we enabled the interrupt. Do so now as we
* have a second waiter for a future seqno. Afterwards,
* we have to wake up that waiter in case we missed
* the interrupt, or if we have to handle an
* exception rather than a seqno completion.
*/
b->timeout = wait_timeout();
b->first_wait = to_wait(next);
rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
if (b->first_wait->seqno != wait->seqno)
__intel_breadcrumbs_enable_irq(b);
wake_up_process(b->first_wait->tsk);
} else {
b->first_wait = NULL;
rcu_assign_pointer(b->irq_seqno_bh, NULL);
__intel_breadcrumbs_disable_irq(b);
}
__intel_breadcrumbs_next(engine, next);
} else {
GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
}
@ -398,15 +490,35 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
out_unlock:
GEM_BUG_ON(b->first_wait == wait);
out:
GEM_BUG_ON(b->irq_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
spin_unlock_irq(&b->lock);
(b->irq_wait ? &b->irq_wait->node : NULL));
}
static bool signal_complete(struct drm_i915_gem_request *request)
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* Quick check to see if this waiter was already decoupled from
* the tree by the bottom-half to avoid contention on the spinlock
* by the herd.
*/
if (RB_EMPTY_NODE(&wait->node))
return;
spin_lock_irq(&b->rb_lock);
__intel_engine_remove_wait(engine, wait);
spin_unlock_irq(&b->rb_lock);
}
static bool signal_valid(const struct drm_i915_gem_request *request)
{
return intel_wait_check_request(&request->signaling.wait, request);
}
static bool signal_complete(const struct drm_i915_gem_request *request)
{
if (!request)
return false;
@ -415,7 +527,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
* signalled that this wait is already completed.
*/
if (intel_wait_complete(&request->signaling.wait))
return true;
return signal_valid(request);
/* Carefully check if the request is complete, giving time for the
* seqno to be visible or if the GPU hung.
@ -458,40 +570,62 @@ static int intel_breadcrumbs_signaler(void *arg)
* need to wait for a new interrupt from the GPU or for
* a new client.
*/
request = READ_ONCE(b->first_signal);
rcu_read_lock();
request = rcu_dereference(b->first_signal);
if (request)
request = i915_gem_request_get_rcu(request);
rcu_read_unlock();
if (signal_complete(request)) {
/* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt.
*/
intel_engine_remove_wait(engine,
&request->signaling.wait);
local_bh_disable();
dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
spin_lock_irq(&b->rb_lock);
/* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt.
*/
__intel_engine_remove_wait(engine,
&request->signaling.wait);
/* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may
* have installed an even older signal than the one
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
spin_lock_irq(&b->lock);
if (request == b->first_signal) {
if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb =
rb_next(&request->signaling.node);
b->first_signal = rb ? to_signaler(rb) : NULL;
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
spin_unlock_irq(&b->lock);
RB_CLEAR_NODE(&request->signaling.node);
spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request);
} else {
if (kthread_should_stop())
DEFINE_WAIT(exec);
if (kthread_should_stop()) {
GEM_BUG_ON(request);
break;
}
if (request)
add_wait_queue(&request->execute, &exec);
schedule();
if (request)
remove_wait_queue(&request->execute, &exec);
if (kthread_should_park())
kthread_parkme();
}
i915_gem_request_put(request);
} while (1);
__set_current_state(TASK_RUNNING);
@ -504,6 +638,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *parent, **p;
bool first, wakeup;
u32 seqno;
/* Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
@ -513,15 +648,19 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
*/
/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
assert_spin_locked(&request->lock);
if (!request->global_seqno)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
seqno = i915_gem_request_global_seqno(request);
if (!seqno)
return;
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.seqno = request->global_seqno;
request->signaling.wait.request = request;
request->signaling.wait.seqno = seqno;
i915_gem_request_get(request);
spin_lock(&b->lock);
spin_lock(&b->rb_lock);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
@ -542,8 +681,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
if (i915_seqno_passed(request->global_seqno,
to_signaler(parent)->global_seqno)) {
if (i915_seqno_passed(seqno,
to_signaler(parent)->signaling.wait.seqno)) {
p = &parent->rb_right;
first = false;
} else {
@ -553,20 +692,52 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
rb_link_node(&request->signaling.node, parent, p);
rb_insert_color(&request->signaling.node, &b->signals);
if (first)
smp_store_mb(b->first_signal, request);
rcu_assign_pointer(b->first_signal, request);
spin_unlock(&b->lock);
spin_unlock(&b->rb_lock);
if (wakeup)
wake_up_process(b->signaler);
}
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
GEM_BUG_ON(!request->signaling.wait.seqno);
spin_lock(&b->rb_lock);
if (!RB_EMPTY_NODE(&request->signaling.node)) {
if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb =
rb_next(&request->signaling.node);
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
i915_gem_request_put(request);
}
__intel_engine_remove_wait(engine, &request->signaling.wait);
spin_unlock(&b->rb_lock);
request->signaling.wait.seqno = 0;
}
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct task_struct *tsk;
spin_lock_init(&b->lock);
spin_lock_init(&b->rb_lock);
spin_lock_init(&b->irq_lock);
setup_timer(&b->fake_irq,
intel_breadcrumbs_fake_irq,
(unsigned long)engine);
@ -604,20 +775,26 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
spin_lock_irq(&b->lock);
spin_lock_irq(&b->irq_lock);
__intel_breadcrumbs_disable_irq(b);
if (intel_engine_has_waiter(engine)) {
b->timeout = wait_timeout();
__intel_breadcrumbs_enable_irq(b);
if (READ_ONCE(b->irq_posted))
wake_up_process(b->first_wait->tsk);
} else {
/* sanitize the IMR and unmask any auxiliary interrupts */
if (b->irq_enabled)
irq_enable(engine);
else
irq_disable(engine);
}
spin_unlock_irq(&b->lock);
/* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
* GPU is active and may have already executed the MI_USER_INTERRUPT
* before the CPU is ready to receive. However, the engine is currently
* idle (we haven't started it yet), there is no possibility for a
* missed interrupt as we enabled the irq and so we can clear the
* immediate wakeup (until a real interrupt arrives for the waiter).
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
if (b->irq_armed)
enable_fake_irq(b);
spin_unlock_irq(&b->irq_lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@ -625,9 +802,9 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The engines should be idle and all requests accounted for! */
WARN_ON(READ_ONCE(b->first_wait));
WARN_ON(READ_ONCE(b->irq_wait));
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
WARN_ON(READ_ONCE(b->first_signal));
WARN_ON(rcu_access_pointer(b->first_signal));
WARN_ON(!RB_EMPTY_ROOT(&b->signals));
if (!IS_ERR_OR_NULL(b->signaler))
@ -636,29 +813,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool busy = false;
for_each_engine(engine, i915, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
spin_lock_irq(&b->rb_lock);
spin_lock_irq(&b->lock);
if (b->first_wait) {
wake_up_process(b->first_wait->tsk);
mask |= intel_engine_flag(engine);
}
if (b->first_signal) {
wake_up_process(b->signaler);
mask |= intel_engine_flag(engine);
}
spin_unlock_irq(&b->lock);
if (b->irq_wait) {
wake_up_process(b->irq_wait->tsk);
busy |= intel_engine_flag(engine);
}
return mask;
if (rcu_access_pointer(b->first_signal)) {
wake_up_process(b->signaler);
busy |= intel_engine_flag(engine);
}
spin_unlock_irq(&b->rb_lock);
return busy;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_breadcrumbs.c"
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -340,20 +340,12 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
hsw_enable_ips(intel_crtc);
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
static void broadwell_load_luts(struct drm_crtc_state *state)
static void bdw_load_degamma_lut(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
return;
}
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
@ -377,6 +369,20 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
(v << 20) | (v << 10) | v);
}
}
}
static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
I915_WRITE(PREC_PAL_INDEX(pipe),
(offset ? PAL_PREC_SPLIT_MODE : 0) |
PAL_PREC_AUTO_INCREMENT |
offset);
if (state->gamma_lut) {
struct drm_color_lut *lut =
@ -410,6 +416,23 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
}
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
return;
}
bdw_load_degamma_lut(state);
bdw_load_gamma_lut(state,
INTEL_INFO(dev_priv)->color.degamma_lut_size);
intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
@ -422,6 +445,58 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
static void glk_load_degamma_lut(struct drm_crtc_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
const uint32_t lut_size = 33;
uint32_t i;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
/*
* FIXME: The pipe degamma table in geminilake doesn't support
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
uint32_t v = (i * ((1 << 16) - 1)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16) - 1);
}
static void glk_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
glk_load_degamma_lut(state);
if (crtc_state_is_legacy(state)) {
haswell_load_luts(state);
return;
}
bdw_load_gamma_lut(state, 0);
intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
POSTING_READ(GAMMA_MODE(pipe));
}
/* Loads the palette/gamma unit for the CRTC on CherryView. */
static void cherryview_load_luts(struct drm_crtc_state *state)
{
@ -536,10 +611,13 @@ void intel_color_init(struct drm_crtc *crtc)
} else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
} else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else if (IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
}

Просмотреть файл

@ -69,12 +69,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
@ -91,7 +90,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@ -676,7 +675,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
@ -689,8 +687,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_encoder->power_domain);
if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
@ -745,7 +742,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
drm_modeset_acquire_fini(&ctx);
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain);
return status;
}
@ -761,12 +758,10 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_encoder->power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
@ -778,7 +773,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain);
return ret;
}
@ -904,6 +899,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->adpa_reg = adpa_reg;
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;

Просмотреть файл

@ -34,9 +34,9 @@
* low-power state and comes back to normal.
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
#define I915_CSR_GLK "i915/glk_dmc_ver1_03.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 3)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
@ -396,13 +396,11 @@ static void csr_load_work_fn(struct work_struct *work)
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
const struct firmware *fw = NULL;
int ret;
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
ret = request_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->drm.pdev->dev);
request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
if (fw)
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);

Просмотреть файл

@ -34,6 +34,19 @@ struct ddi_buf_trans {
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
};
static const u8 index_to_dp_signal_levels[] = {
[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1,
[2] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2,
[3] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3,
[4] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0,
[5] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1,
[6] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2,
[7] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0,
[8] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1,
[9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0,
};
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
@ -445,7 +458,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_GEN9_LP(dev_priv))
return hdmi_level;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
@ -468,6 +481,59 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
return hdmi_level;
}
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_KABYLAKE(dev_priv)) {
return kbl_get_buf_trans_dp(dev_priv, n_entries);
} else if (IS_SKYLAKE(dev_priv)) {
return skl_get_buf_trans_dp(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
return bdw_ddi_translations_dp;
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
return hsw_ddi_translations_dp;
}
*n_entries = 0;
return NULL;
}
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) {
return skl_get_buf_trans_edp(dev_priv, n_entries);
} else if (IS_BROADWELL(dev_priv)) {
return bdw_get_buf_trans_edp(dev_priv, n_entries);
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
return hsw_ddi_translations_dp;
}
*n_entries = 0;
return NULL;
}
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
return hsw_ddi_translations_fdi;
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
return hsw_ddi_translations_fdi;
}
*n_entries = 0;
return NULL;
}
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@ -477,76 +543,43 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_dp_entries, n_edp_entries, size;
int i, n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations;
if (IS_GEN9_LP(dev_priv))
return;
if (IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
} else if (IS_SKYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
&n_entries);
break;
case INTEL_OUTPUT_DP:
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv,
&n_entries);
break;
case INTEL_OUTPUT_ANALOG:
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
&n_entries);
break;
default:
MISSING_CASE(encoder->type);
return;
}
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
n_edp_entries > 9))
n_edp_entries = 9;
n_entries > 9))
n_entries = 9;
}
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
case INTEL_OUTPUT_DP:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
case INTEL_OUTPUT_ANALOG:
ddi_translations = ddi_translations_fdi;
size = n_dp_entries;
break;
default:
BUG();
}
for (i = 0; i < size; i++) {
for (i = 0; i < n_entries; i++) {
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations[i].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
@ -572,7 +605,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
/* If we're boosting the current, set bit 31 of trans1 */
@ -641,15 +674,15 @@ static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
* DDI A (which is used for eDP)
*/
void hsw_fdi_link_train(struct drm_crtc *crtc)
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val, ddi_pll_sel;
for_each_encoder_on_crtc(dev, crtc, encoder) {
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
intel_prepare_dp_ddi_buffers(encoder);
}
@ -668,7 +701,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
udelay(220);
@ -678,7 +711,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
@ -698,7 +731,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->config->fdi_lanes - 1) << 1) |
((crtc_state->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
@ -785,21 +818,20 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
}
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *intel_encoder, *ret = NULL;
int num_encoders = 0;
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
for_each_encoder_on_crtc(dev, &crtc->base, intel_encoder) {
ret = intel_encoder;
num_encoders++;
}
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
@ -1089,7 +1121,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
else if (IS_GEN9_BC(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
@ -1150,7 +1182,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else if (IS_GEN9_LP(dev_priv))
@ -1161,12 +1193,12 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
intel_encoder);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
@ -1174,7 +1206,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config->pipe_bpp) {
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@ -1194,12 +1226,12 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@ -1209,14 +1241,13 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@ -1225,7 +1256,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
switch (intel_crtc->config->pipe_bpp) {
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@ -1242,9 +1273,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@ -1255,8 +1286,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev_priv) &&
(intel_crtc->config->pch_pfit.enabled ||
intel_crtc->config->pch_pfit.force_thru))
(crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@ -1274,20 +1305,20 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
if (intel_crtc->config->has_hdmi_sink)
if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
@ -1316,12 +1347,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain))
return false;
if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
@ -1363,7 +1393,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_encoder->power_domain);
return ret;
}
@ -1374,13 +1404,12 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
@ -1437,29 +1466,39 @@ out:
"(PHY_CTL %08x)\n", port_name(port), tmp);
}
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum pipe pipe;
if (intel_ddi_get_hw_state(encoder, &pipe))
return BIT_ULL(dig_port->ddi_io_power_domain);
return 0;
}
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
}
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@ -1582,50 +1621,38 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
ddi_translations[level].deemphasis);
}
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
if (encoder->type == INTEL_OUTPUT_EDP)
intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
else
intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
if (WARN_ON(n_entries < 1))
n_entries = 1;
if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
return index_to_dp_signal_levels[n_entries - 1] &
DP_TRAIN_VOLTAGE_SWING_MASK;
}
static uint32_t translate_signal_level(int signal_levels)
{
uint32_t level;
int i;
switch (signal_levels) {
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 0;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 1;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
level = 2;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
level = 3;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 4;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 5;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
level = 6;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 7;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
level = 8;
break;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
level = 9;
break;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
if (index_to_dp_signal_levels[i] == signal_levels)
return i;
}
return level;
WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
return 0;
}
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
@ -1641,7 +1668,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
@ -1658,7 +1685,7 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
uint32_t val;
/* DDI -> PLL mapping */
@ -1684,6 +1711,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
intel_dp_set_link_params(intel_dp, link_rate, lane_count,
link_mst);
@ -1691,6 +1721,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, pll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
intel_prepare_dp_ddi_buffers(encoder);
intel_ddi_init_dp_buf_reg(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@ -1710,11 +1743,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct drm_encoder *drm_encoder = &encoder->base;
enum port port = intel_ddi_get_encoder_port(encoder);
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, pll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
intel_prepare_hdmi_ddi_buffers(encoder);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
@ -1729,23 +1766,21 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
int type = intel_encoder->type;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
intel_ddi_pre_enable_dp(intel_encoder,
crtc->config->port_clock,
crtc->config->lane_count,
crtc->config->shared_dpll,
intel_crtc_has_type(crtc->config,
pipe_config->port_clock,
pipe_config->lane_count,
pipe_config->shared_dpll,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(intel_encoder,
pipe_config->has_hdmi_sink,
pipe_config, conn_state,
crtc->config->shared_dpll);
pipe_config->shared_dpll);
}
}
@ -1756,6 +1791,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
int type = intel_encoder->type;
uint32_t val;
bool wait = false;
@ -1784,7 +1820,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (dig_port)
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
if (IS_GEN9_BC(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_GEN(dev_priv) < 9)
@ -1835,8 +1874,6 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@ -1863,10 +1900,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
intel_edp_drrs_enable(intel_dp, pipe_config);
}
if (intel_crtc->config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (pipe_config->has_audio)
intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
}
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder,
@ -1874,16 +1909,10 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (intel_crtc->config->has_audio) {
if (old_crtc_state->has_audio)
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -1898,8 +1927,7 @@ static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
uint8_t mask = pipe_config->lane_lat_optim_mask;
bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
@ -2126,45 +2154,6 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
struct intel_shared_dpll *
intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = connector->encoder;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_shared_dpll *pll = NULL;
struct intel_shared_dpll_state tmp_pll_state;
enum intel_dpll_id dpll_id;
if (IS_GEN9_LP(dev_priv)) {
dpll_id = (enum intel_dpll_id)dig_port->port;
/*
* Select the required PLL. This works for platforms where
* there is no shared DPLL.
*/
pll = &dev_priv->shared_dplls[dpll_id];
if (WARN_ON(pll->active_mask)) {
DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
pll->active_mask);
return NULL;
}
tmp_pll_state = pll->state;
if (!bxt_ddi_dp_set_dpll_hw_state(clock,
&pll->state.hw_state)) {
DRM_ERROR("Could not setup DPLL\n");
pll->state = tmp_pll_state;
return NULL;
}
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
pll = skl_find_link_pll(dev_priv, clock);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock);
}
return pll;
}
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
@ -2241,12 +2230,38 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
switch (port) {
case PORT_A:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_A_IO;
break;
case PORT_B:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_B_IO;
break;
case PORT_C:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_C_IO;
break;
case PORT_D:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_D_IO;
break;
case PORT_E:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_E_IO;
break;
default:
MISSING_CASE(port);
}
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
@ -2265,6 +2280,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@ -2274,14 +2290,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in

Просмотреть файл

@ -56,6 +56,8 @@ static const char * const platform_names[] = {
const char *intel_platform_name(enum intel_platform platform)
{
BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
platform_names[platform] == NULL))
return "<unknown>";
@ -234,7 +236,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
sseu->subslice_mask = BIT(ss_max) - 1;
sseu->subslice_mask = GENMASK(ss_max - 1, 0);
sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
GEN8_F2_SS_DIS_SHIFT);
@ -410,10 +412,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->has_snoop = !info->has_llc;
/* Snooping is broken on BXT A stepping. */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
info->has_snoop = false;
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
DRM_DEBUG_DRIVER("subslice total: %u\n",

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -28,8 +28,10 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <asm/byteorder.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@ -226,7 +228,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
if (IS_GEN9_LP(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
} else if (IS_GEN9_BC(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
@ -394,14 +396,12 @@ static void pps_lock(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
/*
* See vlv_power_sequencer_reset() why we need
* a power domain reference here.
*/
power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
mutex_lock(&dev_priv->pps_mutex);
}
@ -412,12 +412,10 @@ static void pps_unlock(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
mutex_unlock(&dev_priv->pps_mutex);
power_domain = intel_display_port_aux_power_domain(encoder);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
static void
@ -916,7 +914,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (intel_dig_port->port == PORT_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
@ -1593,6 +1591,13 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
if (bpc > 0)
bpp = min(bpp, 3*bpc);
/* For DP Compliance we override the computed bpp for the pipe */
if (intel_dp->compliance.test_data.bpc != 0) {
pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
pipe_config->pipe_bpp);
}
return bpp;
}
@ -1613,6 +1618,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock;
int link_rate_index;
int bpp, mode_rate;
int link_avail, link_clock;
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
@ -1654,6 +1660,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
/* Use values requested by Compliance Test Request */
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
link_rate_index = intel_dp_link_rate_index(intel_dp,
common_rates,
intel_dp->compliance.test_link_rate);
if (link_rate_index >= 0)
min_clock = max_clock = link_rate_index;
min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
max_lane_count, common_rates[max_clock],
@ -1753,8 +1768,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well.
*/
if (is_edp(intel_dp) &&
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
int vco;
switch (pipe_config->port_clock / 2) {
@ -1767,7 +1781,7 @@ found:
break;
}
to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
}
if (!HAS_DDI(dev_priv))
@ -1987,9 +2001,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@ -2005,8 +2017,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->port));
@ -2064,8 +2075,6 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@ -2095,8 +2104,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
static void edp_panel_vdd_work(struct work_struct *__work)
@ -2209,11 +2217,8 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_ctrl_reg;
@ -2245,8 +2250,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
@ -2492,12 +2496,11 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
@ -2533,7 +2536,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@ -3080,9 +3083,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
if (IS_GEN9_LP(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
return intel_ddi_dp_voltage_max(encoder);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev_priv) && port == PORT_A)
@ -3922,19 +3924,112 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_ACK;
return test_result;
int status = 0;
int min_lane_count = 1;
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int link_rate_index, test_link_rate;
uint8_t test_lane_count, test_link_bw;
/* (DP CTS 1.2)
* 4.3.1.11
*/
/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
&test_lane_count);
if (status <= 0) {
DRM_DEBUG_KMS("Lane count read failed\n");
return DP_TEST_NAK;
}
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
/* Validate the requested lane count */
if (test_lane_count < min_lane_count ||
test_lane_count > intel_dp->max_sink_lane_count)
return DP_TEST_NAK;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
&test_link_bw);
if (status <= 0) {
DRM_DEBUG_KMS("Link Rate read failed\n");
return DP_TEST_NAK;
}
/* Validate the requested link rate */
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
link_rate_index = intel_dp_link_rate_index(intel_dp,
common_rates,
test_link_rate);
if (link_rate_index < 0)
return DP_TEST_NAK;
intel_dp->compliance.test_lane_count = test_lane_count;
intel_dp->compliance.test_link_rate = test_link_rate;
return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
return test_result;
uint8_t test_pattern;
uint16_t test_misc;
__be16 h_width, v_height;
int status = 0;
/* Read the TEST_PATTERN (DP CTS 3.1.5) */
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
&test_pattern, 1);
if (status <= 0) {
DRM_DEBUG_KMS("Test pattern read failed\n");
return DP_TEST_NAK;
}
if (test_pattern != DP_COLOR_RAMP)
return DP_TEST_NAK;
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
&h_width, 2);
if (status <= 0) {
DRM_DEBUG_KMS("H Width read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
&v_height, 2);
if (status <= 0) {
DRM_DEBUG_KMS("V Height read failed\n");
return DP_TEST_NAK;
}
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
&test_misc, 1);
if (status <= 0) {
DRM_DEBUG_KMS("TEST MISC read failed\n");
return DP_TEST_NAK;
}
if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
return DP_TEST_NAK;
if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
return DP_TEST_NAK;
switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
case DP_TEST_BIT_DEPTH_6:
intel_dp->compliance.test_data.bpc = 6;
break;
case DP_TEST_BIT_DEPTH_8:
intel_dp->compliance.test_data.bpc = 8;
break;
default:
return DP_TEST_NAK;
}
intel_dp->compliance.test_data.video_pattern = test_pattern;
intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
/* Set test active flag here so userspace doesn't interrupt things */
intel_dp->compliance.test_active = 1;
return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
uint8_t test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@ -3969,7 +4064,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
}
/* Set test active flag here so userspace doesn't interrupt things */
@ -3987,45 +4082,42 @@ static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
uint8_t response = DP_TEST_NAK;
uint8_t rxdata = 0;
int status = 0;
uint8_t request = 0;
int status;
status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
if (status <= 0) {
DRM_DEBUG_KMS("Could not read test request from sink\n");
goto update_status;
}
switch (rxdata) {
switch (request) {
case DP_TEST_LINK_TRAINING:
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING;
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN;
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
DRM_DEBUG_KMS("EDID test requested\n");
intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ;
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
break;
}
if (response & DP_TEST_ACK)
intel_dp->compliance.test_type = request;
update_status:
status = drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_RESPONSE,
&response, 1);
status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
if (status <= 0)
DRM_DEBUG_KMS("Could not write test response to sink\n");
}
@ -4137,9 +4229,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!intel_dp->lane_count)
return;
/* if link training is requested we should perform it always */
if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
/* Retrain if Channel EQ or CR not ok */
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
@ -4164,6 +4255,7 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@ -4197,7 +4289,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
@ -4205,6 +4297,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
drm_kms_helper_hotplug_event(intel_encoder->base.dev);
}
return true;
}
@ -4213,9 +4310,13 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
uint8_t *dpcd = intel_dp->dpcd;
uint8_t type;
if (lspcon->active)
lspcon_resume(lspcon);
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
@ -4474,11 +4575,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
u8 sink_irq_vector = 0;
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain);
intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
@ -4511,11 +4610,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
/* Set the max lane count for sink */
intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
if (intel_dp->reset_link_params) {
/* Set the max lane count for sink */
intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
/* Set the max link BW for sink */
intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
/* Set the max link BW for sink */
intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
intel_dp->reset_link_params = false;
}
intel_dp_print_rates(intel_dp);
@ -4575,7 +4678,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
intel_display_power_put(to_i915(dev), power_domain);
intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
return status;
}
@ -4603,7 +4706,6 @@ intel_dp_force(struct drm_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain power_domain;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@ -4612,12 +4714,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
intel_dp_set_edid(intel_dp);
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
@ -4852,7 +4953,6 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
lockdep_assert_held(&dev_priv->pps_mutex);
@ -4866,8 +4966,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
edp_panel_vdd_schedule_off(intel_dp);
}
@ -4897,6 +4996,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (lspcon->active)
lspcon_resume(lspcon);
intel_dp->reset_link_params = true;
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@ -4939,10 +5040,8 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
@ -4966,12 +5065,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
long_hpd ? "long" : "short");
if (long_hpd) {
intel_dp->reset_link_params = true;
intel_dp->detect_done = false;
return IRQ_NONE;
}
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@ -4999,7 +5098,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
ret = IRQ_HANDLED;
put_power:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
return ret;
}
@ -5790,6 +5889,41 @@ out_vdd_off:
return false;
}
/* Set up the hotplug pin and aux power domain. */
static void
intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
{
struct intel_encoder *encoder = &intel_dig_port->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
switch (intel_dig_port->port) {
case PORT_A:
encoder->hpd_pin = HPD_PORT_A;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
break;
case PORT_B:
encoder->hpd_pin = HPD_PORT_B;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
break;
case PORT_C:
encoder->hpd_pin = HPD_PORT_C;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
break;
case PORT_D:
encoder->hpd_pin = HPD_PORT_D;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
case PORT_E:
encoder->hpd_pin = HPD_PORT_E;
/* FIXME: Check VBT for actual wiring of PORT E */
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
default:
MISSING_CASE(intel_dig_port->port);
}
}
bool
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
@ -5807,6 +5941,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dig_port->max_lanes, port_name(port)))
return false;
intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
@ -5863,6 +5998,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
intel_dp_init_connector_port_info(intel_dig_port);
intel_dp_aux_init(intel_dp);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
@ -5875,29 +6012,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* Set up the hotplug pin. */
switch (port) {
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
intel_encoder->hpd_pin = HPD_PORT_E;
break;
default:
BUG();
}
/* init MST on ports that can support it */
if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
@ -5982,6 +6096,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;

Просмотреть файл

@ -47,6 +47,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
bpp = intel_dp->compliance.test_data.bpc * 3;
DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
bpp);
}
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
@ -55,7 +60,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
state = pipe_config->base.state;
@ -87,7 +92,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@ -98,10 +102,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio) {
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@ -157,23 +159,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
if (intel_dp->active_mst_links == 0) {
intel_ddi_clk_select(&intel_dig_port->base,
pipe_config->shared_dpll);
intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
intel_dp_set_link_params(intel_dp,
pipe_config->port_clock,
pipe_config->lane_count,
true);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
connector->port,
@ -214,10 +202,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
if (pipe_config->has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@ -548,6 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;

Просмотреть файл

@ -42,44 +42,6 @@
* commit phase.
*/
struct intel_shared_dpll *
skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
{
struct intel_shared_dpll *pll = NULL;
struct intel_dpll_hw_state dpll_hw_state;
enum intel_dpll_id i;
bool found = false;
if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return pll;
for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
if (pll->state.crtc_mask == 0)
continue;
if (memcmp(&dpll_hw_state, &pll->state.hw_state,
sizeof(pll->state.hw_state)) == 0) {
found = true;
break;
}
}
/* Ok no matching timings, maybe there's a free one? */
for (i = DPLL_ID_SKL_DPLL1;
((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
pll = &dev_priv->shared_dplls[i];
if (pll->state.crtc_mask == 0) {
pll->state.hw_state = dpll_hw_state;
break;
}
}
return pll;
}
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@ -811,8 +773,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
return pll;
}
struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
int clock)
static struct intel_shared_dpll *
hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll;
@ -1360,8 +1322,9 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
bool skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
static bool
skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
uint32_t ctrl1;
@ -1816,8 +1779,9 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
return true;
}
bool bxt_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
static bool
bxt_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct bxt_clk_div clk_div = {0};
@ -2016,7 +1980,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
dpll_mgr = &skl_pll_mgr;
else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr;

Просмотреть файл

@ -282,20 +282,4 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
/* BXT dpll related functions */
bool bxt_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state);
/* SKL dpll related functions */
bool skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state);
struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
int clock);
/* HSW dpll related functions */
struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
int clock);
#endif /* _INTEL_DPLL_MGR_H_ */

Просмотреть файл

@ -242,6 +242,9 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_state *pipe_config);
/* Returns a mask of power domains that need to be referenced as part
* of the hardware state readout code. */
u64 (*get_power_domains)(struct intel_encoder *encoder);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@ -250,6 +253,7 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
@ -334,13 +338,20 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
unsigned int cdclk;
struct {
/*
* Logical state of cdclk (used for all scaling, watermark,
* etc. calculations and checks). This is computed as if all
* enabled crtcs were active.
*/
struct intel_cdclk_state logical;
/*
* Calculated device cdclk, can be different from cdclk
* only when all crtc's are DPMS off.
*/
unsigned int dev_cdclk;
/*
* Actual state of cdclk, can be different from the logical
* state only when all crtc's are DPMS off.
*/
struct intel_cdclk_state actual;
} cdclk;
bool dpll_set, modeset;
@ -357,9 +368,6 @@ struct intel_atomic_state {
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
/* SKL/KBL Only */
unsigned int cdclk_pll_vco;
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/*
@ -485,6 +493,24 @@ struct skl_pipe_wm {
uint32_t linetime;
};
enum vlv_wm_level {
VLV_WM_LEVEL_PM2,
VLV_WM_LEVEL_PM5,
VLV_WM_LEVEL_DDR_DVFS,
NUM_VLV_WM_LEVELS,
};
struct vlv_wm_state {
struct vlv_pipe_wm wm[NUM_VLV_WM_LEVELS];
struct vlv_sr_wm sr[NUM_VLV_WM_LEVELS];
uint8_t num_levels;
bool cxsr;
};
struct vlv_fifo_state {
u16 plane[I915_MAX_PLANES];
};
struct intel_crtc_wm_state {
union {
struct {
@ -509,6 +535,17 @@ struct intel_crtc_wm_state {
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
} skl;
struct {
/* "raw" watermarks (not inverted) */
struct vlv_pipe_wm raw[NUM_VLV_WM_LEVELS];
/* intermediate watermarks (inverted) */
struct vlv_wm_state intermediate;
/* optimal watermarks (inverted) */
struct vlv_wm_state optimal;
/* display FIFO split */
struct vlv_fifo_state fifo_state;
} vlv;
};
/*
@ -539,12 +576,19 @@ struct intel_crtc_state {
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
int pipe_src_w, pipe_src_h;
/*
* Pipe pixel rate, adjusted for
* panel fitter/pipe scaler downscaling.
*/
unsigned int pixel_rate;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@ -581,6 +625,14 @@ struct intel_crtc_state {
*/
bool dither;
/*
* Dither gets enabled for 18bpp which causes CRC mismatch errors for
* compliance video pattern tests.
* Disable dither only if it is a compliance test request for
* 18bpp.
*/
bool dither_force_disable;
/* Controls for the clock computation, to override various stages. */
bool clock_set;
@ -674,15 +726,9 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
};
struct vlv_wm_state {
struct vlv_pipe_wm wm[3];
struct vlv_sr_wm sr[3];
uint8_t num_active_planes;
uint8_t num_levels;
uint8_t level;
bool cxsr;
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
};
struct intel_crtc {
@ -698,7 +744,7 @@ struct intel_crtc {
bool active;
bool lowfreq_avail;
u8 plane_ids_mask;
unsigned long enabled_power_domains;
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
struct intel_flip_work *flip_work;
@ -730,10 +776,8 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
struct vlv_wm_state vlv;
} active;
/* allow CxSR on this pipe */
bool cxsr_allowed;
} wm;
int scanline_offset;
@ -747,27 +791,6 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
struct vlv_wm_state wm_state;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint32_t vert_pixels;
/*
* For packed pixel formats:
* bytes_per_pixel - holds bytes per pixel
* For planar pixel formats:
* bytes_per_pixel - holds bytes per pixel for uv-plane
* y_bytes_per_pixel - holds bytes per pixel for y-plane
*/
uint8_t bytes_per_pixel;
uint8_t y_bytes_per_pixel;
bool enabled;
bool scaled;
u64 tiling;
unsigned int rotation;
uint16_t fifo_size;
};
struct intel_plane {
@ -779,13 +802,6 @@ struct intel_plane {
int max_downscale;
uint32_t frontbuffer_bit;
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
struct intel_plane_wm_parameters wm;
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
@ -891,12 +907,17 @@ struct intel_dp_desc {
struct intel_dp_compliance_data {
unsigned long edid;
uint8_t video_pattern;
uint16_t hdisplay, vdisplay;
uint8_t bpc;
};
struct intel_dp_compliance {
unsigned long test_type;
struct intel_dp_compliance_data test_data;
bool test_active;
int test_link_rate;
u8 test_lane_count;
};
struct intel_dp {
@ -911,6 +932,7 @@ struct intel_dp {
bool has_audio;
bool detect_done;
bool channel_eq_status;
bool reset_link_params;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
@ -928,6 +950,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@ -990,7 +1013,6 @@ struct intel_dp {
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
bool desc_valid;
};
struct intel_digital_port {
@ -1003,6 +1025,7 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
};
struct intel_dp_mst_encoder {
@ -1097,7 +1120,19 @@ intel_attached_encoder(struct drm_connector *connector)
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
return container_of(encoder, struct intel_digital_port,
base.base);
default:
return NULL;
}
}
static inline struct intel_dp_mst_encoder *
@ -1185,18 +1220,19 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
@ -1209,11 +1245,12 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
int clock);
unsigned int intel_fb_align_height(struct drm_device *dev,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
unsigned int intel_fb_align_height(struct drm_i915_private *dev_priv,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
@ -1231,12 +1268,24 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
/* intel_display.c */
enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
extern const struct drm_plane_funcs intel_plane_funcs;
@ -1311,9 +1360,8 @@ struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
@ -1388,10 +1436,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
@ -1664,6 +1709,7 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
@ -1692,10 +1738,8 @@ static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
/* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
* too much noise. */
if (!atomic_read(&dev_priv->pm.wakeref_count))
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
/**
@ -1783,6 +1827,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
@ -1791,7 +1836,6 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
static inline int intel_enable_rc6(void)
@ -1865,9 +1909,9 @@ intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
return to_intel_plane_state(plane_state);
}
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);

Просмотреть файл

@ -80,7 +80,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
}
}
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@ -357,6 +357,110 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
return true;
}
static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp, val;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(MIPI_CTRL(port));
I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
}
/* Put the IO into reset */
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
I915_WRITE(MIPI_CTRL(PORT_A), tmp);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(MIPI_CTRL(port));
tmp |= GLK_LP_WAKE;
I915_WRITE(MIPI_CTRL(port), tmp);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPIO port is powergated\n");
}
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not ON\n");
}
/* Get IO out of reset */
tmp = I915_READ(MIPI_CTRL(PORT_A));
I915_WRITE(MIPI_CTRL(PORT_A), tmp | GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= DEVICE_READY;
I915_WRITE(MIPI_DEVICE_READY(port), val);
usleep_range(10, 15);
}
/* Enter ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS Not active */
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE,
GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS is still active\n");
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~GLK_LP_WAKE;
I915_WRITE(MIPI_CTRL(port), tmp);
}
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
GLK_DATA_LANE_STOP_STATE, 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
AFE_LATCHOUT, 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -366,32 +470,19 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
/* Exit Low power state in 4 steps*/
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
/* 1. Enable MIPI PHY transparent latch */
val = I915_READ(BXT_MIPI_PORT_CTRL(port));
I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* 2. Enter ULPS */
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* at least 2us - relaxed for hrtimer subsystem optimization */
usleep_range(10, 50);
/* 3. Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
usleep_range(1000, 1500);
/* Clear ULPS and set device ready */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
usleep_range(2000, 2500);
val |= DEVICE_READY;
I915_WRITE(MIPI_DEVICE_READY(port), val);
}
@ -442,8 +533,121 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
else if (IS_GEN9_LP(dev_priv))
else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
else if (IS_GEMINILAKE(dev_priv))
glk_dsi_device_ready(encoder);
}
static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
}
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
}
}
static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 tmp;
/* Put the IO into reset */
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
I915_WRITE(MIPI_CTRL(PORT_A), tmp);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_wait_for_register(dev_priv,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~GLK_MIPIIO_ENABLE;
I915_WRITE(MIPI_CTRL(port), tmp);
}
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
{
glk_dsi_enter_low_power_mode(encoder);
glk_dsi_disable_mipi_io(encoder);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/*
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_wait_for_register(dev_priv,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
val = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
}
static void intel_dsi_port_enable(struct intel_encoder *encoder)
@ -456,12 +660,21 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
if (IS_GEN9_LP(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
temp = I915_READ(MIPI_CTRL(port));
temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
BXT_PIXEL_OVERLAP_CNT_SHIFT;
I915_WRITE(MIPI_CTRL(port), temp);
}
} else {
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
I915_WRITE(VLV_CHICKEN_3, temp);
I915_WRITE(VLV_CHICKEN_3, temp);
}
}
for_each_dsi_port(port, intel_dsi->ports) {
@ -509,37 +722,57 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
} else {
msleep(20); /* XXX */
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
msleep(100);
drm_panel_enable(intel_dsi->panel);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_enable(encoder);
}
intel_panel_enable_backlight(intel_dsi->attached_connector);
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
return;
msleep(msec);
}
/*
* Panel enable/disable sequences from the VBT spec.
*
* Note the spec has AssertReset / DeassertReset swapped from their
* usual naming. We use the normal names to avoid confusion (so below
* they are swapped compared to the spec).
*
* Steps starting with MIPI refer to VBT sequences, note that for v2
* VBTs several steps which have a VBT in v2 are expected to be handled
* directly by the driver, by directly driving gpios for example.
*
* v2 video mode seq v3 video mode seq command mode seq
* - power on - MIPIPanelPowerOn - power on
* - wait t1+t2 - wait t1+t2
* - MIPIDeassertResetPin - MIPIDeassertResetPin - MIPIDeassertResetPin
* - io lines to lp-11 - io lines to lp-11 - io lines to lp-11
* - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds
* - MIPITearOn
* - MIPIDisplayOn
* - turn on DPI - turn on DPI - set pipe to dsr mode
* - MIPIDisplayOn - MIPIDisplayOn
* - wait t5 - wait t5
* - backlight on - MIPIBacklightOn - backlight on
* ... ... ... issue mem cmds ...
* - backlight off - MIPIBacklightOff - backlight off
* - wait t6 - wait t6
* - MIPIDisplayOff
* - turn off DPI - turn off DPI - disable pipe dsr mode
* - MIPITearOff
* - MIPIDisplayOff - MIPIDisplayOff
* - io lines to lp-00 - io lines to lp-00 - io lines to lp-00
* - MIPIAssertResetPin - MIPIAssertResetPin - MIPIAssertResetPin
* - wait t3 - wait t3
* - power off - MIPIPanelPowerOff - power off
* - wait t4 - wait t4
*/
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@ -548,6 +781,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
@ -558,13 +792,16 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_disable_dsi_pll(encoder);
intel_enable_dsi_pll(encoder, pipe_config);
intel_dsi_prepare(encoder, pipe_config);
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
val | MIPIO_RST_CTRL);
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
msleep(intel_dsi->panel_on_delay);
/* Power up DSI regulator */
I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
@ -575,17 +812,43 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
I915_WRITE(DSPCLK_GATE_D, val);
}
/* put device in ready state */
intel_dsi_prepare(encoder, pipe_config);
/* Power on, try both CRC pmic gpio and VBT */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
/* Deassert reset */
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
/* Put device in ready state (LP-11) */
intel_dsi_device_ready(encoder);
drm_panel_prepare(intel_dsi->panel);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
/* Send initialization commands in LP mode */
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
msleep(20); /* XXX */
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
intel_dsi_msleep(intel_dsi, 100);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
intel_dsi_port_enable(encoder);
}
intel_panel_enable_backlight(intel_dsi->attached_connector);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
@ -604,13 +867,30 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(intel_dsi->attached_connector);
/*
* Disable Device ready before the port shutdown in order
* to avoid split screen
*/
if (IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports)
I915_WRITE(MIPI_DEVICE_READY(port), 0);
}
/*
* According to the spec we should send SHUTDOWN before
* MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
* has shown that the v3 sequence works for v2 VBTs too
*/
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
for_each_dsi_port(port, intel_dsi->ports)
@ -619,86 +899,15 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder,
}
}
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
msleep(2);
}
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
intel_dsi_reset_clocks(encoder, port);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
temp &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
drm_panel_disable(intel_dsi->panel);
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
if (intel_wait_for_register(dev_priv,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
val = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
IS_BROXTON(dev_priv))
vlv_dsi_clear_device_ready(encoder);
else if (IS_GEMINILAKE(dev_priv))
glk_dsi_clear_device_ready(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@ -707,13 +916,43 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
DRM_DEBUG_KMS("\n");
intel_dsi_disable(encoder);
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
usleep_range(2000, 5000);
}
intel_dsi_unprepare(encoder);
/*
* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed
*/
if (is_cmd_mode(intel_dsi))
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
val & ~MIPIO_RST_CTRL);
}
intel_disable_dsi_pll(encoder);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@ -724,11 +963,12 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
I915_WRITE(DSPCLK_GATE_D, val);
}
drm_panel_unprepare(intel_dsi->panel);
/* Assert reset */
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
msleep(intel_dsi->panel_off_delay);
/* Panel Disable over CRC PMIC */
/* Power off, try both CRC pmic gpio and VBT */
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
intel_dsi_exec_vbt_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
@ -736,7 +976,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
msleep(intel_dsi->panel_pwr_cycle_delay);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@ -744,14 +984,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
/*
@ -807,7 +1046,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
out_put_power:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return active;
}
@ -1279,6 +1518,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*/
I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
I915_WRITE(MIPI_CLK_LANE_TIMING(port),
intel_dsi->dphy_reg);
}
/* the bw essential for transmitting 16 long packets containing
* 252 bytes meant for dcs write memory command is programmed in
* this register in terms of byte clocks. based on dsi transfer
@ -1302,6 +1549,30 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
}
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
if (!IS_GEMINILAKE(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
intel_dsi_reset_clocks(encoder, port);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
val = I915_READ(MIPI_DSI_FUNC_PRG(port));
val &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
}
}
}
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@ -1485,6 +1756,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_encoder->port = port;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
@ -1560,7 +1832,8 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
* In case of BYT with CRC PMIC, we need to use GPIO for
* Panel control.
*/
if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
(dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
intel_dsi->gpio_panel =
gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
@ -1571,6 +1844,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
}
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);

Просмотреть файл

@ -130,6 +130,11 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config);

Просмотреть файл

@ -192,6 +192,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
wait_for_dsi_fifo_empty(intel_dsi, port);
out:
data += len;
@ -424,10 +426,9 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
void intel_dsi_exec_vbt_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
@ -491,40 +492,6 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
}
}
static int vbt_panel_prepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
}
static int vbt_panel_unprepare(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
return 0;
}
static int vbt_panel_enable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
}
static int vbt_panel_get_modes(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
@ -548,10 +515,6 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
}
static const struct drm_panel_funcs vbt_panel_funcs = {
.disable = vbt_panel_disable,
.unprepare = vbt_panel_unprepare,
.prepare = vbt_panel_prepare,
.enable = vbt_panel_enable,
.get_modes = vbt_panel_get_modes,
};
@ -571,6 +534,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u32 mul;
u16 burst_mode_ratio;
enum port port;
@ -674,11 +638,6 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
break;
}
/*
* ui(s) = 1/f [f in hz]
* ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
*/
/* in Kbps */
ui_num = NS_KHZ_RATIO;
ui_den = bitrate;
@ -692,21 +651,26 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
*/
intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
/* count values in UI = (ns value) * (bitrate / (2 * 10^6))
/* DDR clock period = 2 * UI
* UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
* UI(nsec) = 10^6 / bitrate
* DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
* DDR clock count = ns_value / DDR clock period
*
* Since txddrclkhs_i is 2xUI, all the count values programmed in
* DPHY param register are divided by 2
*
* prepare count
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* prepare count */
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
/* exit zero count */
exit_zero_cnt = DIV_ROUND_UP(
(ths_prepare_hszero - ths_prepare_ns) * ui_den,
ui_num * 2
ui_num * mul
);
/*
@ -720,12 +684,12 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
(tclk_prepare_clkzero - ths_prepare_ns)
* ui_den, 2 * ui_num);
(tclk_prepare_clkzero - ths_prepare_ns)
* ui_den, ui_num * mul);
/* trail count */
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX ||
exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
@ -801,6 +765,19 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
DRM_DEBUG_KMS("Video mode format %s\n",
intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
"non-burst with sync pulse" :
intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
"non-burst with sync events" :
intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
"burst" : "<unknown>");
DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");

Просмотреть файл

@ -206,17 +206,24 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return false;
/*
* Both dividers must be programmed with valid values even if only one
* of the PLL is used, see BSpec/Broxton Clocks. Check this here for
* Dividers must be programmed with valid values. As per BSEPC, for
* GEMINLAKE only PORT A divider values are checked while for BXT
* both divider values are validated. Check this here for
* paranoia, since BIOS is known to misconfigure PLLs in this way at
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
val = I915_READ(BXT_DSI_PLL_CTL);
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
val);
enabled = false;
if (IS_GEMINILAKE(dev_priv)) {
if (!(val & BXT_DSIA_16X_MASK)) {
DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
return enabled;
@ -372,6 +379,53 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
ESCAPE_CLOCK_DIVIDER_SHIFT);
}
static void glk_dsi_program_esc_clock(struct drm_device *dev,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
u32 div1_value = 0;
u32 div2_value = 0;
u32 txesc1_div = 0;
u32 txesc2_div = 0;
pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
ddr_clk = dsi_rate / 2;
/* Variable divider value */
div1_value = DIV_ROUND_CLOSEST(ddr_clk, 20000);
/* Calculate TXESC1 divider */
if (div1_value <= 10)
txesc1_div = div1_value;
else if ((div1_value > 10) && (div1_value <= 20))
txesc1_div = DIV_ROUND_UP(div1_value, 2);
else if ((div1_value > 20) && (div1_value <= 30))
txesc1_div = DIV_ROUND_UP(div1_value, 4);
else if ((div1_value > 30) && (div1_value <= 40))
txesc1_div = DIV_ROUND_UP(div1_value, 6);
else if ((div1_value > 40) && (div1_value <= 50))
txesc1_div = DIV_ROUND_UP(div1_value, 8);
else
txesc1_div = 10;
/* Calculate TXESC2 divider */
div2_value = DIV_ROUND_UP(div1_value, txesc1_div);
if (div2_value < 10)
txesc2_div = div2_value;
else
txesc2_div = 10;
I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
const struct intel_crtc_state *config)
@ -416,11 +470,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
/* As per bpsec program the 8/3X clock divider to the below value */
if (dev_priv->vbt.dsi.config->is_cmd_mode)
mipi_8by3_divider = 0x2;
else
mipi_8by3_divider = 0x3;
mipi_8by3_divider = 0x2;
tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
@ -430,11 +480,12 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
@ -446,11 +497,20 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
* round 'up' the result
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
if (IS_BROXTON(dev_priv)) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
dsi_ratio_min = GLK_DSI_PLL_RATIO_MIN;
dsi_ratio_max = GLK_DSI_PLL_RATIO_MAX;
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
}
} else
DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@ -462,13 +522,13 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
if (dsi_ratio <= 50)
if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
return 0;
}
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -483,8 +543,12 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
for_each_dsi_port(port, intel_dsi->ports)
bxt_dsi_program_clocks(encoder->base.dev, port, config);
if (IS_BROXTON(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports)
bxt_dsi_program_clocks(encoder->base.dev, port, config);
} else {
glk_dsi_program_esc_clock(encoder->base.dev, config);
}
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
@ -522,7 +586,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
else if (IS_GEN9_LP(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return gen9lp_compute_dsi_pll(encoder, config);
return -ENODEV;
}
@ -535,7 +599,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
else if (IS_GEN9_LP(dev_priv))
bxt_enable_dsi_pll(encoder, config);
gen9lp_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
@ -548,19 +612,30 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
bxt_disable_dsi_pll(encoder);
}
static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
if (IS_BROXTON(dev_priv)) {
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
} else {
tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
}
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
@ -569,7 +644,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
gen9lp_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}

Просмотреть файл

@ -515,6 +515,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
"DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);

Просмотреть файл

@ -28,8 +28,8 @@
static const struct engine_info {
const char *name;
unsigned exec_id;
enum intel_engine_hw_id hw_id;
unsigned int exec_id;
unsigned int hw_id;
u32 mmio_base;
unsigned irq_shift;
int (*init_legacy)(struct intel_engine_cs *engine);
@ -110,21 +110,20 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
}
/**
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
* intel_engines_init_early() - allocate the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
int intel_engines_init_early(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int i;
int ret;
int err;
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
@ -134,20 +133,8 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
if (!HAS_ENGINE(dev_priv, i))
continue;
if (i915.enable_execlists)
init = intel_engines[i].init_execlists;
else
init = intel_engines[i].init_legacy;
if (!init)
continue;
ret = intel_engine_setup(dev_priv, i);
if (ret)
goto cleanup;
ret = init(dev_priv->engine[i]);
if (ret)
err = intel_engine_setup(dev_priv, i);
if (err)
goto cleanup;
mask |= ENGINE_MASK(i);
@ -166,14 +153,67 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
return 0;
cleanup:
for_each_engine(engine, dev_priv, id)
kfree(engine);
return err;
}
/**
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
struct intel_engine_cs *engine;
enum intel_engine_id id, err_id;
unsigned int mask = 0;
int err = 0;
for_each_engine(engine, dev_priv, id) {
int (*init)(struct intel_engine_cs *engine);
if (i915.enable_execlists)
intel_logical_ring_cleanup(engine);
init = intel_engines[id].init_execlists;
else
intel_engine_cleanup(engine);
init = intel_engines[id].init_legacy;
if (!init) {
kfree(engine);
dev_priv->engine[id] = NULL;
continue;
}
err = init(engine);
if (err) {
err_id = id;
goto cleanup;
}
mask |= ENGINE_MASK(id);
}
return ret;
/*
* Catch failures to update intel_engines table when the new engines
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
device_info->ring_mask = mask;
device_info->num_rings = hweight32(mask);
return 0;
cleanup:
for_each_engine(engine, dev_priv, id) {
if (id >= err_id)
kfree(engine);
else
dev_priv->gt.cleanup_engine(engine);
}
return err;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
@ -212,8 +252,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
engine->irq_seqno_barrier(engine);
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
engine->timeline->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
/* After manually advancing the seqno, fake the interrupt in case
@ -482,3 +520,601 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
break;
}
}
static int wa_add(struct drm_i915_private *dev_priv,
i915_reg_t addr,
const u32 mask, const u32 val)
{
const u32 idx = dev_priv->workarounds.count;
if (WARN_ON(idx >= I915_MAX_WA_REGS))
return -ENOSPC;
dev_priv->workarounds.reg[idx].addr = addr;
dev_priv->workarounds.reg[idx].value = val;
dev_priv->workarounds.reg[idx].mask = mask;
dev_priv->workarounds.count++;
return 0;
}
#define WA_REG(addr, mask, val) do { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
} while (0)
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
i915_mmio_reg_offset(reg));
wa->hw_whitelist_count[engine->id]++;
return 0;
}
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
return 0;
}
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_workarounds(engine);
if (ret)
return ret;
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in broadwell_init_clock_gating()
* to disable EUTC clock gating.
*/
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
WA_SET_BIT_MASKED(HDC_CHICKEN0,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_workarounds(engine);
if (ret)
return ret;
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
return 0;
}
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
* WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
* but we do that in per ctx batchbuffer as there is an issue
* with this register not getting restored on ctx restore
*/
}
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
/* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl. We keep the tie for all gen9. The
* documentation is a bit hazy and so we want to get common behaviour,
* even though there is no clear evidence we would need both on kbl/bxt.
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says.
*
* Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl,bxt,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
for (i = 0; i < 3; i++) {
u8 ss;
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
continue;
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return 0;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
return 0;
}
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/*
* Actual WA is to disable percontext preemption granularity control
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
/* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
return skl_tune_iz_hashing(engine);
}
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisablePooledEuLoadBalancingFix:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
/* WaDisableSbeCacheDispatchPortSharing:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
/* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaInPlaceDecompressionHang:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
return 0;
}
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaEnableGapsTsvCreditFix:kbl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
WA_SET_BIT(GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:kbl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
return 0;
}
static int glk_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
return 0;
}
int init_workarounds_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int err;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
if (IS_BROADWELL(dev_priv))
err = bdw_init_workarounds(engine);
else if (IS_CHERRYVIEW(dev_priv))
err = chv_init_workarounds(engine);
else if (IS_SKYLAKE(dev_priv))
err = skl_init_workarounds(engine);
else if (IS_BROXTON(dev_priv))
err = bxt_init_workarounds(engine);
else if (IS_KABYLAKE(dev_priv))
err = kbl_init_workarounds(engine);
else if (IS_GEMINILAKE(dev_priv))
err = glk_init_workarounds(engine);
else
err = 0;
if (err)
return err;
DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
engine->name, dev_priv->workarounds.count);
return 0;
}
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
struct i915_workarounds *w = &req->i915->workarounds;
u32 *cs;
int ret, i;
if (w->count == 0)
return 0;
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
cs = intel_ring_begin(req, (w->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(w->count);
for (i = 0; i < w->count; i++) {
*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
*cs++ = w->reg[i].value;
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
return 0;
}
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
*
* Return true if there are no requests pending, nothing left to be submitted
* to hardware, and that the engine is idle.
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
/* Any inflight/incomplete requests? */
if (!i915_seqno_passed(intel_engine_get_seqno(engine),
intel_engine_last_submit(engine)))
return false;
/* Interrupt/tasklet pending? */
if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
return false;
/* Both ports drained, no more ELSP submission? */
if (engine->execlist_port[0].request)
return false;
/* Ring stopped? */
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
return false;
return true;
}
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_is_idle(engine))
return false;
}
return true;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif

Просмотреть файл

@ -537,8 +537,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
end = U64_MAX;
@ -628,7 +627,8 @@ err_fb:
kfree(compressed_llb);
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
if (drm_mm_initialized(&dev_priv->mm.stolen))
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@ -743,8 +743,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation;
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
@ -819,7 +818,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
fbc->no_fbc_reason = "pixel rate is too big";
return false;
}

Просмотреть файл

@ -45,6 +45,14 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
struct drm_i915_gem_object *obj = ifbdev->fb->obj;
unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU;
intel_fb_obj_invalidate(obj, origin);
}
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@ -53,12 +61,8 @@ static int intel_fbdev_set_par(struct fb_info *info)
int ret;
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
if (ret == 0)
intel_fbdev_invalidate(ifbdev);
return ret;
}
@ -71,12 +75,8 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
int ret;
ret = drm_fb_helper_blank(blank, info);
if (ret == 0) {
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
if (ret == 0)
intel_fbdev_invalidate(ifbdev);
return ret;
}
@ -87,15 +87,11 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
int ret;
ret = drm_fb_helper_pan_display(var, info);
if (ret == 0) {
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
ret = drm_fb_helper_pan_display(var, info);
if (ret == 0)
intel_fbdev_invalidate(ifbdev);
return ret;
}
@ -121,7 +117,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *obj;
int size, ret;
/* we don't do packed 24bpp */
@ -136,14 +132,13 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
obj = NULL;
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
@ -151,24 +146,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
goto out;
goto err;
}
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb)) {
i915_gem_object_put(obj);
ret = PTR_ERR(fb);
goto out;
goto err_obj;
}
mutex_unlock(&dev->struct_mutex);
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
out:
mutex_unlock(&dev->struct_mutex);
err_obj:
i915_gem_object_put(obj);
err:
return ret;
}
@ -355,23 +348,23 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask;
unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
int pass = 0;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
memcpy(save_enabled, enabled, count);
mask = BIT(count) - 1;
mask = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@ -385,7 +378,7 @@ retry:
if (conn_configured & BIT(i))
continue;
if (pass == 0 && !connector->has_tile)
if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@ -496,10 +489,8 @@ retry:
conn_configured |= BIT(i);
}
if ((conn_configured & mask) != mask) {
pass++;
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the
@ -628,7 +619,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
cur_size = intel_fb_align_height(to_i915(dev), cur_size,
fb->base.format->format,
fb->base.modifier);
cur_size *= fb->base.pitches[0];
@ -838,11 +829,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
if (!ifbdev->fb)
return;
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
DRM_DEBUG("failed to restore crtc mode\n");
} else {
mutex_lock(&dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&dev->struct_mutex);
}
if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
intel_fbdev_invalidate(ifbdev);
}

Просмотреть файл

@ -54,7 +54,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
struct intel_crtc *crtc;
enum pipe pipe;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@ -72,7 +72,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@ -90,7 +90,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
i915_reg_t reg = PIPESTAT(crtc->pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@ -98,6 +98,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
@ -109,7 +110,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if (enable) {
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
@ -139,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
uint32_t err_int = I915_READ(GEN7_ERR_INT);
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@ -147,6 +148,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
POSTING_READ(GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
@ -204,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@ -212,6 +214,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
@ -248,7 +251,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@ -368,9 +371,11 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
crtc->cpu_fifo_underrun_disabled)
return;
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
@ -388,9 +393,11 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder)
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false))
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
DRM_ERROR("PCH transcoder %s FIFO underrun\n",
transcoder_name(pch_transcoder));
}
}
/**

Просмотреть файл

@ -114,13 +114,12 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
}
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (retire) {
if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;

Просмотреть файл

@ -38,7 +38,6 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
@ -69,15 +68,12 @@ static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
* completed and frontbuffer caching can be started again.
*/
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
@ -86,7 +82,7 @@ static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
if (!frontbuffer_bits)
return;
__intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
__intel_fb_obj_flush(obj, origin, frontbuffer_bits);
}
#endif /* __INTEL_FRONTBUFFER_H__ */

Просмотреть файл

@ -520,10 +520,6 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_uc_fw_status_repr(guc_fw->fetch_status),
intel_uc_fw_status_repr(guc_fw->load_status));
intel_guc_auth_huc(dev_priv);
if (i915.enable_guc_submission) {
@ -536,6 +532,11 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
guc_interrupts_capture(dev_priv);
}
DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
i915.enable_guc_submission ? "submission enabled" : "loaded",
guc_fw->path,
guc_fw->major_ver_found, guc_fw->minor_ver_found);
return 0;
fail:
@ -713,12 +714,9 @@ fail:
DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
err, fw, uc_fw->obj);
mutex_lock(&dev_priv->drm.struct_mutex);
obj = uc_fw->obj;
obj = fetch_and_zero(&uc_fw->obj);
if (obj)
i915_gem_object_put(obj);
uc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
@ -792,16 +790,17 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
void intel_guc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct drm_i915_gem_object *obj;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
if (guc_fw->obj)
i915_gem_object_put(guc_fw->obj);
guc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
obj = fetch_and_zero(&guc_fw->obj);
if (obj)
i915_gem_object_put(obj);
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}

Просмотреть файл

@ -480,3 +480,7 @@ void intel_hangcheck_init(struct drm_i915_private *i915)
INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_hangcheck.c"
#endif

Просмотреть файл

@ -902,12 +902,11 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
@ -927,7 +926,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@ -1869,14 +1868,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A;
else
intel_encoder->hpd_pin = HPD_PORT_B;
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
@ -1988,6 +1980,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)

Просмотреть файл

@ -100,7 +100,6 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
}
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
/**
@ -112,9 +111,13 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
* storms. Only the pin specific stats and state are changed, the caller is
* responsible for further action.
*
* @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
* otherwise it's considered an irq storm, and the irq state is set to
* @HPD_MARK_DISABLED.
* The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
* stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
* considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
*
* The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
* and should only be adjusted for automated hotplug testing.
*
* Return true if an irq storm was detected on @pin.
*/
@ -123,13 +126,15 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
{
unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int threshold = dev_priv->hotplug.hpd_storm_threshold;
bool storm = false;
if (!time_in_range(jiffies, start, end)) {
dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
dev_priv->hotplug.stats[pin].count = 0;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
} else if (dev_priv->hotplug.stats[pin].count > threshold &&
threshold) {
dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
@ -152,7 +157,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
enum hpd_pin pin;
bool hpd_disabled = false;
assert_spin_locked(&dev_priv->irq_lock);
lockdep_assert_held(&dev_priv->irq_lock);
list_for_each_entry(connector, &mode_config->connector_list, head) {
if (connector->polled != DRM_CONNECTOR_POLL_HPD)
@ -219,7 +224,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
}
if (dev_priv->display.hpd_irq_setup)
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@ -425,7 +430,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
if (storm_detected)
if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
@ -471,10 +476,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
}
static void i915_hpd_poll_init_work(struct work_struct *work)

Просмотреть файл

@ -181,12 +181,14 @@ void intel_huc_init(struct drm_i915_private *dev_priv)
}
huc_fw->path = fw_path;
if (huc_fw->path == NULL)
return;
huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
intel_uc_fw_fetch(dev_priv, huc_fw);
}
@ -274,12 +276,11 @@ fail:
void intel_huc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
struct drm_i915_gem_object *obj;
mutex_lock(&dev_priv->drm.struct_mutex);
if (huc_fw->obj)
i915_gem_object_put(huc_fw->obj);
huc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
obj = fetch_and_zero(&huc_fw->obj);
if (obj)
i915_gem_object_put(obj);
huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}

Просмотреть файл

@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{
if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
else if (IS_GEN9_BC(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
@ -89,7 +89,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
else if (IS_GEN9_BC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -68,8 +68,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
int intel_engines_init(struct drm_i915_private *dev_priv);
/* Logical Ring Contexts */
/* One extra page is added before LRC for GuC as shared data */
@ -90,6 +88,5 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */

Просмотреть файл

@ -162,21 +162,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
unsigned long start = jiffies;
if (!lspcon->desc_valid)
return;
while (1) {
struct intel_dp_desc desc;
/*
* The w/a only applies in PCON mode and we don't expect any
* AUX errors.
*/
if (!__intel_dp_read_desc(intel_dp, &desc))
return;
if (intel_digital_port_connected(dev_priv, dig_port) &&
!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
if (intel_digital_port_connected(dev_priv, dig_port)) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
@ -253,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
lspcon->desc_valid = intel_dp_read_desc(dp);
intel_dp_read_desc(dp);
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;

Просмотреть файл

@ -91,12 +91,11 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
return false;
ret = false;
@ -114,7 +113,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
intel_display_power_put(dev_priv, power_domain);
intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@ -1066,6 +1065,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev_priv))

Просмотреть файл

@ -178,7 +178,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
@ -191,7 +191,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
"Platform that should have a MOCS table does not.\n");
}
/* WaDisableSkipCaching:skl,bxt,kbl */
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN9(dev_priv)) {
int i;
@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (ret)
return ret;
cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
for (index = 0; index < table->size; index++) {
intel_ring_emit_reg(ring, mocs_register(engine, index));
intel_ring_emit(ring, table->table[index].control_value);
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
*cs++ = table->table[index].control_value;
}
/*
@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_ring_emit_reg(ring, mocs_register(engine, index));
intel_ring_emit(ring, table->table[0].control_value);
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
*cs++ = table->table[0].control_value;
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ring *ring = req->ring;
unsigned int i;
int ret;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
if (ret)
return ret;
cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
for (i = 0; i < table->size/2; i++) {
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 2 * i, 0);
i++;
}
@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 0, 0));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 0, 0);
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}

Просмотреть файл

@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
int ret;
u32 *cs;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
overlay->active = true;
@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_I830(dev_priv))
i830_overlay_clock_gating(dev_priv, false);
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
*cs++ = overlay->flip_addr | OFC_UPDATE;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
u32 tmp, *cs;
WARN_ON(!overlay->active);
@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, vma);
@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
int ret;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
/* turn overlay off */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
intel_ring_advance(ring);
intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, NULL);
@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
u32 *cs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
struct intel_ring *ring;
req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
intel_ring_emit(ring,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);

Просмотреть файл

@ -1315,7 +1315,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_PINEVIEW(dev_priv))
clock = KHz(dev_priv->rawclk_freq);
else
clock = KHz(dev_priv->cdclk_freq);
clock = KHz(dev_priv->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@ -1333,7 +1333,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_G4X(dev_priv))
clock = KHz(dev_priv->rawclk_freq);
else
clock = KHz(dev_priv->cdclk_freq);
clock = KHz(dev_priv->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}

Просмотреть файл

@ -80,7 +80,7 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
{
assert_spin_locked(&pipe_crc->lock);
lockdep_assert_held(&pipe_crc->lock);
return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
INTEL_PIPE_CRC_ENTRIES_NR);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -5,6 +5,7 @@
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
#include "i915_selftest.h"
#define I915_CMD_HASH_ORDER 9
@ -144,6 +145,7 @@ struct intel_ring {
u32 head;
u32 tail;
int space;
int size;
int effective_size;
@ -184,26 +186,26 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_request;
struct intel_render_state;
/*
* Engine IDs definitions.
* Keep instances of the same type engine together.
*/
enum intel_engine_id {
RCS = 0,
BCS,
VCS,
VCS2,
#define _VCS(n) (VCS + (n))
VECS
};
struct intel_engine_cs {
struct drm_i915_private *i915;
const char *name;
enum intel_engine_id {
RCS = 0,
BCS,
VCS,
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
#define _VCS(n) (VCS + (n))
enum intel_engine_id id;
unsigned int exec_id;
enum intel_engine_hw_id {
RCS_HW = 0,
VCS_HW,
BCS_HW,
VECS_HW,
VCS2_HW
} hw_id;
enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
unsigned int hw_id;
unsigned int guc_id;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
@ -211,6 +213,11 @@ struct intel_engine_cs {
struct intel_render_state *render_state;
atomic_t irq_count;
unsigned long irq_posted;
#define ENGINE_IRQ_BREADCRUMB 0
#define ENGINE_IRQ_EXECLIST 1
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@ -228,22 +235,22 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
spinlock_t irq_lock; /* protects irq_*; irqsafe */
struct intel_wait *irq_wait; /* oldest waiter by retirement */
spinlock_t lock; /* protects the lists of requests; irqsafe */
spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
struct intel_wait *first_wait; /* oldest waiter by retirement */
struct task_struct *signaler; /* used for fence signalling */
struct drm_i915_gem_request *first_signal;
struct drm_i915_gem_request __rcu *first_signal;
struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */
unsigned long timeout;
unsigned int hangcheck_interrupts;
bool irq_armed : 1;
bool irq_enabled : 1;
bool rpm_wakelock : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
/*
@ -285,7 +292,7 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
u32 *out);
u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
@ -368,7 +375,7 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
/* Execlists */
@ -376,13 +383,11 @@ struct intel_engine_cs {
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
GEM_DEBUG_DECL(u32 context_id);
} execlist_port[2];
struct rb_root execlist_queue;
struct rb_node *execlist_first;
unsigned int fw_domains;
bool disable_lite_restore_wa;
bool preempt_wa;
u32 ctx_desc_template;
/* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the
@ -492,21 +497,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
{
*(uint32_t *)(ring->vaddr + ring->tail) = data;
ring->tail += 4;
}
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
{
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
static inline void intel_ring_advance(struct intel_ring *ring)
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
{
/* Dummy function.
*
@ -516,16 +512,18 @@ static inline void intel_ring_advance(struct intel_ring *ring)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
}
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
static inline u32
intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - ring->vaddr;
return offset & (ring->size - 1);
u32 offset = addr - req->ring->vaddr;
GEM_BUG_ON(offset > req->ring->size);
return offset & (req->ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
@ -558,10 +556,11 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
* wtih serialising this hint with anything, so document it as
* a hint and nothing more.
*/
return READ_ONCE(engine->timeline->last_submitted_seqno);
return READ_ONCE(engine->timeline->seqno);
}
int init_workarounds_ring(struct intel_engine_cs *engine);
int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@ -583,12 +582,51 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
static inline void intel_wait_init(struct intel_wait *wait,
struct drm_i915_gem_request *rq)
{
wait->tsk = current;
wait->request = rq;
}
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
wait->tsk = current;
wait->seqno = seqno;
}
static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
return wait->seqno;
}
static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
wait->seqno = seqno;
return intel_wait_has_seqno(wait);
}
static inline bool
intel_wait_update_request(struct intel_wait *wait,
const struct drm_i915_gem_request *rq)
{
return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
}
static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
return wait->seqno == seqno;
}
static inline bool
intel_wait_check_request(const struct intel_wait *wait,
const struct drm_i915_gem_request *rq)
{
return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
}
static inline bool intel_wait_complete(const struct intel_wait *wait)
{
return RB_EMPTY_NODE(&wait->node);
@ -599,38 +637,36 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
return READ_ONCE(engine->breadcrumbs.irq_wait);
}
static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
{
bool wakeup = false;
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
#define ENGINE_WAKEUP_ASLEEP BIT(1)
/* Note that for this not to dangerously chase a dangling pointer,
* we must hold the rcu_read_lock here.
*
* Also note that tsk is likely to be in !TASK_RUNNING state so an
* early test for tsk->state != TASK_RUNNING before wake_up_process()
* is unlikely to be beneficial.
*/
if (intel_engine_has_waiter(engine)) {
struct task_struct *tsk;
rcu_read_lock();
tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
if (tsk)
wakeup = wake_up_process(tsk);
rcu_read_unlock();
}
return wakeup;
}
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
batch[0] = GFX_OP_PIPE_CONTROL(6);
batch[1] = flags;
batch[2] = offset;
return batch + 6;
}
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_RINGBUFFER_H_ */

Просмотреть файл

@ -49,19 +49,6 @@
* present for a given platform.
*/
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
((power_well) = &(power_domains)->power_wells[i]); \
i++) \
for_each_if ((power_well)->domains & (domain_mask))
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
for (i = (power_domains)->power_well_count - 1; \
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
i--) \
for_each_if ((power_well)->domains & (domain_mask))
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
@ -106,6 +93,16 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
return "PORT_DDI_B_IO";
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@ -198,19 +195,15 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
if (dev_priv->pm.suspended)
return false;
power_domains = &dev_priv->power_domains;
is_enabled = true;
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
if (power_well->always_on)
continue;
@ -385,124 +378,121 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
}
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
@ -732,7 +722,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
* other request bits to be set, so WARN for those.
*/
if (power_well_id == SKL_DISP_PW_1 ||
((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
(IS_GEN9_BC(dev_priv) &&
power_well_id == SKL_DISP_PW_MISC_IO))
DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
"by DMC\n", power_well->name);
@ -847,14 +837,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
/*
* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now.
*/
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
/* Take over the request bit if set by BIOS. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_ENABLE_REQUEST))
I915_WRITE(HSW_PWR_WELL_DRIVER,
HSW_PWR_WELL_ENABLE_REQUEST);
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@ -881,10 +871,17 @@ static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
skl_set_power_well(dev_priv, power_well, power_well->count > 0);
uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
/* Clear any request made by BIOS as driver is taking over */
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
if (!(drv_req & mask))
I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
}
}
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
@ -917,16 +914,6 @@ static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->count > 0)
bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
else
bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
}
static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
@ -964,10 +951,12 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct intel_cdclk_state cdclk_state = {};
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
WARN_ON(dev_priv->cdclk_freq !=
dev_priv->display.get_display_clock_speed(dev_priv));
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
gen9_assert_dbuf_enabled(dev_priv);
@ -987,13 +976,9 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
gen9_enable_dc5(dev_priv);
}
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (power_well->count > 0)
gen9_dc_off_power_well_enable(dev_priv, power_well);
else
gen9_dc_off_power_well_disable(dev_priv, power_well);
}
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@ -1043,12 +1028,6 @@ out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
}
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@ -1249,7 +1228,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
int power_well_id)
@ -1659,14 +1638,6 @@ out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->id != PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@ -1693,9 +1664,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
for_each_power_well(i, power_well, BIT(domain), power_domains)
for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
@ -1779,7 +1749,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
power_domains = &dev_priv->power_domains;
@ -1790,7 +1759,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@ -1799,134 +1768,134 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
}
#define HSW_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BDW_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DSI) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_DSI) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
.disable = i9xx_always_on_power_well_noop,
.is_enabled = i9xx_always_on_power_well_enabled,
};
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
.sync_hw = chv_pipe_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
};
static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = chv_dpio_cmn_power_well_enable,
.disable = chv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
@ -1956,14 +1925,14 @@ static const struct i915_power_well_ops skl_power_well_ops = {
};
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
.sync_hw = gen9_dc_off_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = gen9_dc_off_power_well_enable,
.disable = gen9_dc_off_power_well_disable,
.is_enabled = gen9_dc_off_power_well_enabled,
};
static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
.sync_hw = bxt_dpio_cmn_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = bxt_dpio_cmn_power_well_enable,
.disable = bxt_dpio_cmn_power_well_disable,
.is_enabled = bxt_dpio_cmn_power_well_enabled,
@ -1998,21 +1967,21 @@ static struct i915_power_well bdw_power_wells[] = {
};
static const struct i915_power_well_ops vlv_display_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_display_power_well_enable,
.disable = vlv_display_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_power_well_enable,
.disable = vlv_power_well_disable,
.is_enabled = vlv_power_well_enabled,
@ -2155,26 +2124,26 @@ static struct i915_power_well skl_power_wells[] = {
.id = SKL_DISP_PW_2,
},
{
.name = "DDI A/E power well",
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
.name = "DDI A/E IO power well",
.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B power well",
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
.name = "DDI B IO power well",
.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
.name = "DDI C IO power well",
.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D power well",
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
.name = "DDI D IO power well",
.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
@ -2287,20 +2256,20 @@ static struct i915_power_well glk_power_wells[] = {
.id = GLK_DISP_PW_AUX_C,
},
{
.name = "DDI A power well",
.domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
.name = "DDI A IO power well",
.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = GLK_DISP_PW_DDI_A,
},
{
.name = "DDI B power well",
.domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
.name = "DDI B IO power well",
.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C power well",
.domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
.name = "DDI C IO power well",
.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
@ -2323,7 +2292,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@ -2386,7 +2355,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
i915.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@ -2398,7 +2367,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
} else if (IS_GEN9_BC(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
@ -2454,10 +2423,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
for_each_power_well(dev_priv, power_well) {
power_well->ops->sync_hw(dev_priv, power_well);
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
power_well);
@ -2722,7 +2690,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().
* power wells belonging to the INIT power domain. Power wells in other
* domains (and not in the INIT domain) are referenced or disabled during the
* modeset state HW readout. After that the reference count of each power well
* must match its HW enabled state, see intel_power_domains_verify_state().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
@ -2730,7 +2701,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
@ -2769,12 +2740,92 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
}
static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
for_each_power_well(dev_priv, power_well) {
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%-25s %d\n",
power_well->name, power_well->count);
for_each_power_domain(domain, power_well->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
* @dev_priv: i915 device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
* called after modeset HW state sanitization, which is responsible for
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
bool dump_domain_info;
mutex_lock(&power_domains->lock);
dump_domain_info = false;
for_each_power_well(dev_priv, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
/*
* Power wells not belonging to any domain (like the MISC_IO
* and PW1 power wells) are under FW control, so ignore them,
* since their state can change asynchronously.
*/
if (!power_well->domains)
continue;
enabled = power_well->ops->is_enabled(dev_priv, power_well);
if ((power_well->count || power_well->always_on) != enabled)
DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
power_well->name, power_well->count, enabled);
domains_count = 0;
for_each_power_domain(domain, power_well->domains)
domains_count += power_domains->domain_use_count[domain];
if (power_well->count != domains_count) {
DRM_ERROR("power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
power_well->name, power_well->count,
domains_count);
dump_domain_info = true;
}
}
if (dump_domain_info) {
static bool dumped;
if (!dumped) {
intel_power_domains_dump_info(dev_priv);
dumped = true;
}
}
mutex_unlock(&power_domains->lock);
}
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance

Просмотреть файл

@ -2981,6 +2981,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_sdvo_enc_funcs, 0,

Просмотреть файл

@ -60,8 +60,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
}
I915_WRITE(VLV_IOSF_ADDR, addr);
if (!is_read)
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (intel_wait_for_register(dev_priv,
@ -74,7 +73,6 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
if (is_read)
*val = I915_READ(VLV_IOSF_DATA);
I915_WRITE(VLV_IOSF_DATA, 0);
return 0;
}
@ -93,14 +91,18 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
return val;
}
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
int err;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->sb_lock);
return err;
}
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
@ -214,6 +216,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
}
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, 0);
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
@ -223,10 +226,15 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
if (intel_wait_for_register(dev_priv,
SBI_CTL_STAT,
SBI_BUSY | SBI_RESPONSE_FAIL,
SBI_BUSY,
0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
DRM_ERROR("timeout waiting for SBI to complete read\n");
return 0;
}
if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
DRM_ERROR("error during SBI read of reg %x\n", reg);
return 0;
}
@ -258,10 +266,16 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
if (intel_wait_for_register(dev_priv,
SBI_CTL_STAT,
SBI_BUSY | SBI_RESPONSE_FAIL,
SBI_BUSY,
0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
DRM_ERROR("timeout waiting for SBI to complete write\n");
return;
}
if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
DRM_ERROR("error during SBI write of %x to reg %x\n",
value, reg);
return;
}
}

Просмотреть файл

@ -219,13 +219,22 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
plane_ctl = PLANE_CTL_ENABLE;
if (IS_GEMINILAKE(dev_priv)) {
I915_WRITE(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
PLANE_COLOR_PLANE_GAMMA_DISABLE);
} else {
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
PLANE_CTL_PLANE_GAMMA_DISABLE;
}
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
if (key->flags) {

Просмотреть файл

@ -1621,6 +1621,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;

Просмотреть файл

@ -132,6 +132,13 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
}
}
static void
vgpu_fw_domains_nop(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
/* Guest driver doesn't need to takes care forcewake. */
}
static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
@ -499,7 +506,7 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
assert_spin_locked(&dev_priv->uncore.lock);
lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_get)
return;
@ -557,7 +564,7 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
assert_spin_locked(&dev_priv->uncore.lock);
lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_put)
return;
@ -635,33 +642,6 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
return entry->domains;
}
static void
intel_fw_table_check(struct drm_i915_private *dev_priv)
{
const struct intel_forcewake_range *ranges;
unsigned int num_ranges;
s32 prev;
unsigned int i;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
return;
ranges = dev_priv->uncore.fw_domains_table;
if (!ranges)
return;
num_ranges = dev_priv->uncore.fw_domains_table_entries;
for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
WARN_ON_ONCE(IS_GEN9(dev_priv) &&
(prev + 1) != (s32)ranges->start);
WARN_ON_ONCE(prev >= (s32)ranges->start);
prev = ranges->start;
WARN_ON_ONCE(prev >= (s32)ranges->end);
prev = ranges->end;
}
}
#define GEN_FW_RANGE(s, e, d) \
{ .start = (s), .end = (e), .domains = (d) }
@ -700,23 +680,6 @@ static const i915_reg_t gen8_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
static void intel_shadow_table_check(void)
{
const i915_reg_t *reg = gen8_shadowed_regs;
s32 prev;
u32 offset;
unsigned int i;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
return;
for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
offset = i915_mmio_reg_offset(*reg);
WARN_ON_ONCE(prev >= (s32)offset);
prev = offset;
}
}
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@ -985,29 +948,19 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
___force_wake_auto(dev_priv, fw_domains);
}
#define __gen6_read(x) \
#define __gen_read(func, x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
fw_engine = __gen6_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
#define __fwtable_read(x) \
static u##x \
fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
fw_engine = __fwtable_reg_read_fw_domains(offset); \
fw_engine = __##func##_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
#define __gen6_read(x) __gen_read(gen6, x)
#define __fwtable_read(x) __gen_read(fwtable, x)
#define __gen9_decoupled_read(x) \
static u##x \
@ -1045,34 +998,6 @@ __gen6_read(64)
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
#define VGPU_READ_HEADER(x) \
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define VGPU_READ_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
#define __vgpu_read(x) \
static u##x \
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
VGPU_READ_HEADER(x); \
val = __raw_i915_read##x(dev_priv, reg); \
VGPU_READ_FOOTER; \
}
__vgpu_read(8)
__vgpu_read(16)
__vgpu_read(32)
__vgpu_read(64)
#undef __vgpu_read
#undef VGPU_READ_FOOTER
#undef VGPU_READ_HEADER
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
@ -1136,29 +1061,19 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
#define __gen8_write(x) \
#define __gen_write(func, x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
fw_engine = __gen8_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __fwtable_write(x) \
static void \
fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
fw_engine = __fwtable_reg_write_fw_domains(offset); \
fw_engine = __##func##_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen8_write(x) __gen_write(gen8, x)
#define __fwtable_write(x) __gen_write(fwtable, x)
#define __gen9_decoupled_write(x) \
static void \
@ -1195,31 +1110,6 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
#define VGPU_WRITE_HEADER \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_device_not_suspended(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
#define VGPU_WRITE_FOOTER \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
#define __vgpu_write(x) \
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg, u##x val, bool trace) { \
VGPU_WRITE_HEADER; \
__raw_i915_write##x(dev_priv, reg, val); \
VGPU_WRITE_FOOTER; \
}
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
#undef __vgpu_write
#undef VGPU_WRITE_FOOTER
#undef VGPU_WRITE_HEADER
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
@ -1375,6 +1265,11 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE, FORCEWAKE_ACK);
}
if (intel_vgpu_active(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = vgpu_fw_domains_nop;
dev_priv->uncore.funcs.force_wake_put = vgpu_fw_domains_nop;
}
/* All future platforms are expected to require complex power gating */
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
@ -1445,15 +1340,6 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
break;
}
intel_fw_table_check(dev_priv);
if (INTEL_GEN(dev_priv) >= 8)
intel_shadow_table_check();
if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
i915_check_and_clear_faults(dev_priv);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
@ -1971,3 +1857,7 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
return fw_domains;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_uncore.c"
#endif

Просмотреть файл

@ -0,0 +1,135 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "huge_gem_object.h"
static void huge_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
unsigned long nreal = obj->scratch / PAGE_SIZE;
struct scatterlist *sg;
for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
__free_page(sg_page(sg));
sg_free_table(pages);
kfree(pages);
}
static struct sg_table *
huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct scatterlist *sg, *src, *end;
struct sg_table *pages;
unsigned long n;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
return ERR_PTR(-ENOMEM);
if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages);
return ERR_PTR(-ENOMEM);
}
sg = pages->sgl;
for (n = 0; n < nreal; n++) {
struct page *page;
page = alloc_page(GFP | __GFP_HIGHMEM);
if (!page) {
sg_mark_end(sg);
goto err;
}
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = __sg_next(sg);
}
if (nreal < npages) {
for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
src = __sg_next(src);
if (src == end)
src = pages->sgl;
}
}
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
return pages;
err:
huge_free_pages(obj, pages);
return ERR_PTR(-ENOMEM);
#undef GFP
}
static void huge_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
i915_gem_gtt_finish_pages(obj, pages);
huge_free_pages(obj, pages);
obj->mm.dirty = false;
}
static const struct drm_i915_gem_object_ops huge_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,
};
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size)
{
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!phys_size || phys_size > dma_size);
GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(i915);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
obj->scratch = phys_size;
return obj;
}

Просмотреть файл

@ -0,0 +1,45 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __HUGE_GEM_OBJECT_H
#define __HUGE_GEM_OBJECT_H
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size);
static inline phys_addr_t
huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
{
return obj->scratch;
}
static inline dma_addr_t
huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
{
return obj->base.size;
}
#endif /* !__HUGE_GEM_OBJECT_H */

Просмотреть файл

@ -0,0 +1,385 @@
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
#include "i915_random.h"
static int cpu_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
unsigned int needs_clflush;
struct page *page;
typeof(v) *map;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (err)
return err;
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
if (needs_clflush & CLFLUSH_BEFORE)
clflush(map+offset_in_page(offset) / sizeof(*map));
map[offset_in_page(offset) / sizeof(*map)] = v;
if (needs_clflush & CLFLUSH_AFTER)
clflush(map+offset_in_page(offset) / sizeof(*map));
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
return 0;
}
static int cpu_get(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 *v)
{
unsigned int needs_clflush;
struct page *page;
typeof(v) map;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (err)
return err;
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
if (needs_clflush & CLFLUSH_BEFORE)
clflush(map+offset_in_page(offset) / sizeof(*map));
*v = map[offset_in_page(offset) / sizeof(*map)];
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
return 0;
}
static int gtt_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
struct i915_vma *vma;
typeof(v) *map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (err)
return err;
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
i915_vma_unpin_iomap(vma);
return 0;
}
static int gtt_get(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 *v)
{
struct i915_vma *vma;
typeof(v) map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
return err;
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
i915_vma_unpin_iomap(vma);
return 0;
}
static int wc_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
typeof(v) *map;
int err;
/* XXX GTT write followed by WC write go missing */
i915_gem_object_flush_gtt_write_domain(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (err)
return err;
map = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
i915_gem_object_unpin_map(obj);
return 0;
}
static int wc_get(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 *v)
{
typeof(v) map;
int err;
/* XXX WC write followed by GTT write go missing */
i915_gem_object_flush_gtt_write_domain(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
return err;
map = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
i915_gem_object_unpin_map(obj);
return 0;
}
static int gpu_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_i915_gem_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (err)
return err;
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
__i915_add_request(rq, false);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
if (INTEL_GEN(i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
} else {
*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
__i915_add_request(rq, true);
return 0;
}
static bool always_valid(struct drm_i915_private *i915)
{
return true;
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
return igt_can_mi_store_dword_imm(i915);
}
static const struct igt_coherency_mode {
const char *name;
int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
bool (*valid)(struct drm_i915_private *i915);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
{ "gtt", gtt_set, gtt_get, always_valid },
{ "wc", wc_set, wc_get, always_valid },
{ "gpu", gpu_set, NULL, needs_mi_store_dword },
{ },
};
static int igt_gem_coherency(void *arg)
{
const unsigned int ncachelines = PAGE_SIZE/64;
I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
/* We repeatedly write, overwrite and read from a sequence of
* cachelines in order to try and detect incoherency (unflushed writes
* from either the CPU or GPU). Each setter/getter uses our cache
* domain API which should prevent incoherency.
*/
offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
if (!offsets)
return -ENOMEM;
for (count = 0; count < ncachelines; count++)
offsets[count] = count * 64 + 4 * (count % 16);
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
if (!over->valid(i915))
continue;
for (write = igt_coherency_mode; write->name; write++) {
if (!write->set)
continue;
if (!write->valid(i915))
continue;
for (read = igt_coherency_mode; read->name; read++) {
if (!read->get)
continue;
if (!read->valid(i915))
continue;
for_each_prime_number_from(count, 1, ncachelines) {
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto unlock;
}
i915_random_reorder(offsets, ncachelines, &prng);
for (n = 0; n < count; n++)
values[n] = prandom_u32_state(&prng);
for (n = 0; n < count; n++) {
err = over->set(obj, offsets[n], ~values[n]);
if (err) {
pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
n, count, over->name, err);
goto put_object;
}
}
for (n = 0; n < count; n++) {
err = write->set(obj, offsets[n], values[n]);
if (err) {
pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
n, count, write->name, err);
goto put_object;
}
}
for (n = 0; n < count; n++) {
u32 found;
err = read->get(obj, offsets[n], &found);
if (err) {
pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
n, count, read->name, err);
goto put_object;
}
if (found != values[n]) {
pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
n, count, over->name,
write->name, values[n],
read->name, found,
~values[n], offsets[n]);
err = -EINVAL;
goto put_object;
}
}
__i915_gem_object_release_unless_active(obj);
}
}
}
}
unlock:
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
put_object:
__i915_gem_object_release_unless_active(obj);
goto unlock;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_coherency),
};
return i915_subtests(tests, i915);
}

Просмотреть файл

@ -0,0 +1,459 @@
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "../i915_selftest.h"
#include "mock_drm.h"
#include "huge_gem_object.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
static struct i915_vma *
gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
{
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(vma->vm->i915);
unsigned long n, size;
u32 *cmd;
int err;
GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
size = (4 * count + 1) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
obj = i915_gem_object_create_internal(vma->vm->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
}
GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
offset += vma->node.start;
for (n = 0; n < count; n++) {
if (gen >= 8) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = value;
} else if (gen >= 4) {
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
(gen < 6 ? 1 << 22 : 0);
*cmd++ = 0;
*cmd++ = offset;
*cmd++ = value;
} else {
*cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
*cmd++ = offset;
*cmd++ = value;
}
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err;
return vma;
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
{
return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
}
static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
{
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
static int gpu_fill(struct drm_i915_gem_object *obj,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
unsigned int dw)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
struct drm_i915_gem_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
unsigned int flags;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
return err;
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
/* Within the GTT the huge objects maps every page onto
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
* We set the nth dword within the page using the nth
* mapping via the GTT - this should exercise the GTT mapping
* whilst checking that each context provides a unique view
* into the object.
*/
batch = gpu_fill_dw(vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
dw);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_vma;
}
rq = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
err = engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto err_request;
err = i915_switch_context(rq);
if (err)
goto err_request;
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
if (err)
goto err_request;
i915_vma_move_to_active(batch, rq, 0);
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
i915_vma_move_to_active(vma, rq, 0);
i915_vma_unpin(vma);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
__i915_add_request(rq, true);
return 0;
err_request:
__i915_add_request(rq, false);
err_batch:
i915_vma_unpin(batch);
err_vma:
i915_vma_unpin(vma);
return err;
}
static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
{
const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
unsigned int n, m, need_flush;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
if (err)
return err;
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
map = kmap_atomic(i915_gem_object_get_page(obj, n));
for (m = 0; m < DW_PER_PAGE; m++)
map[m] = value;
if (!has_llc)
drm_clflush_virt_range(map, PAGE_SIZE);
kunmap_atomic(map);
}
i915_gem_obj_finish_shmem_access(obj);
obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->base.write_domain = 0;
return 0;
}
static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
if (err)
return err;
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
map = kmap_atomic(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(map, PAGE_SIZE);
for (m = 0; m < max; m++) {
if (map[m] != m) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
n, m, map[m], m);
err = -EINVAL;
goto out_unmap;
}
}
for (; m < DW_PER_PAGE; m++) {
if (map[m] != 0xdeadbeef) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
n, m, map[m], 0xdeadbeef);
err = -EINVAL;
goto out_unmap;
}
}
out_unmap:
kunmap_atomic(map);
if (err)
break;
}
i915_gem_obj_finish_shmem_access(obj);
return err;
}
static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
u64 size;
u32 handle;
int err;
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
/* tie the handle to the drm_file for easy reaping */
err = drm_gem_handle_create(file, &obj->base, &handle);
i915_gem_object_put(obj);
if (err)
return ERR_PTR(err);
err = cpu_fill(obj, 0xdeadbeef);
if (err) {
pr_err("Failed to fill object with cpu, err=%d\n",
err);
return ERR_PTR(err);
}
list_add_tail(&obj->st_link, objects);
return obj;
}
static unsigned long max_dwords(struct drm_i915_gem_object *obj)
{
unsigned long npages = fake_page_count(obj);
GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
return npages / DW_PER_PAGE;
}
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_file *file = mock_file(i915);
struct drm_i915_gem_object *obj;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
unsigned long ncontexts, ndwords, dw;
bool first_shared_gtt = true;
int err;
/* Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
mutex_lock(&i915->drm.struct_mutex);
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
unsigned int id;
if (first_shared_gtt) {
ctx = __create_hw_context(i915, file->driver_priv);
first_shared_gtt = false;
} else {
ctx = i915_gem_create_context(i915, file->driver_priv);
}
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
}
for_each_engine(engine, i915, id) {
if (dw == 0) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_unlock;
}
}
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
goto out_unlock;
}
if (++dw == max_dwords(obj))
dw = 0;
ndwords++;
}
ncontexts++;
}
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
err = cpu_check(obj, rem);
if (err)
break;
dw += rem;
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
int err;
err = i915_gem_init_aliasing_ppgtt(i915);
if (err)
return err;
list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
if (IS_ERR(vma))
continue;
vma->flags &= ~I915_VMA_LOCAL_BIND;
}
return 0;
}
static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
{
i915_gem_fini_aliasing_ppgtt(i915);
}
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_ctx_exec),
};
bool fake_alias = false;
int err;
/* Install a fake aliasing gtt for exercise */
if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
mutex_lock(&dev_priv->drm.struct_mutex);
err = fake_aliasing_ppgtt_enable(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (err)
return err;
GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
fake_alias = true;
}
err = i915_subtests(tests, dev_priv);
if (fake_alias) {
mutex_lock(&dev_priv->drm.struct_mutex);
fake_aliasing_ppgtt_disable(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
return err;
}

Просмотреть файл

@ -0,0 +1,303 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "../i915_selftest.h"
#include "mock_gem_device.h"
#include "mock_dmabuf.h"
static int igt_dmabuf_export(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
obj = i915_gem_object_create(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
return PTR_ERR(dmabuf);
}
dma_buf_put(dmabuf);
return 0;
}
static int igt_dmabuf_import_self(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct drm_gem_object *import;
struct dma_buf *dmabuf;
int err;
obj = i915_gem_object_create(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
err = PTR_ERR(dmabuf);
goto out;
}
import = i915_gem_prime_import(&i915->drm, dmabuf);
if (IS_ERR(import)) {
pr_err("i915_gem_prime_import failed with err=%d\n",
(int)PTR_ERR(import));
err = PTR_ERR(import);
goto out_dmabuf;
}
if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL;
goto out_import;
}
err = 0;
out_import:
i915_gem_object_put(to_intel_bo(import));
out_dmabuf:
dma_buf_put(dmabuf);
out:
i915_gem_object_put(obj);
return err;
}
static int igt_dmabuf_import(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
dmabuf = mock_dmabuf(1);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
pr_err("i915_gem_prime_import failed with err=%d\n",
(int)PTR_ERR(obj));
err = PTR_ERR(obj);
goto out_dmabuf;
}
if (obj->base.dev != &i915->drm) {
pr_err("i915_gem_prime_import created a non-i915 object!\n");
err = -EINVAL;
goto out_obj;
}
if (obj->base.size != PAGE_SIZE) {
pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
(long long)obj->base.size, PAGE_SIZE);
err = -EINVAL;
goto out_obj;
}
dma_map = dma_buf_vmap(dmabuf);
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
goto out_obj;
}
if (0) { /* Can not yet map dmabuf */
obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(obj_map)) {
err = PTR_ERR(obj_map);
pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
goto out_dma_map;
}
for (i = 0; i < ARRAY_SIZE(pattern); i++) {
memset(dma_map, pattern[i], PAGE_SIZE);
if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
err = -EINVAL;
pr_err("imported vmap not all set to %x!\n", pattern[i]);
i915_gem_object_unpin_map(obj);
goto out_dma_map;
}
}
for (i = 0; i < ARRAY_SIZE(pattern); i++) {
memset(obj_map, pattern[i], PAGE_SIZE);
if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
err = -EINVAL;
pr_err("exported vmap not all set to %x!\n", pattern[i]);
i915_gem_object_unpin_map(obj);
goto out_dma_map;
}
}
i915_gem_object_unpin_map(obj);
}
err = 0;
out_dma_map:
dma_buf_vunmap(dmabuf, dma_map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
dma_buf_put(dmabuf);
return err;
}
static int igt_dmabuf_import_ownership(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *ptr;
int err;
dmabuf = mock_dmabuf(1);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
ptr = dma_buf_vmap(dmabuf);
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
goto err_dmabuf;
}
memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_vunmap(dmabuf, ptr);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
pr_err("i915_gem_prime_import failed with err=%d\n",
(int)PTR_ERR(obj));
err = PTR_ERR(obj);
goto err_dmabuf;
}
dma_buf_put(dmabuf);
err = i915_gem_object_pin_pages(obj);
if (err) {
pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
goto out_obj;
}
err = 0;
i915_gem_object_unpin_pages(obj);
out_obj:
i915_gem_object_put(obj);
return err;
err_dmabuf:
dma_buf_put(dmabuf);
return err;
}
static int igt_dmabuf_export_vmap(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *ptr;
int err;
obj = i915_gem_object_create(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
err = PTR_ERR(dmabuf);
goto err_obj;
}
i915_gem_object_put(obj);
ptr = dma_buf_vmap(dmabuf);
if (IS_ERR(ptr)) {
err = PTR_ERR(ptr);
pr_err("dma_buf_vmap failed with err=%d\n", err);
goto out;
}
if (memchr_inv(ptr, 0, dmabuf->size)) {
pr_err("Exported object not initialiased to zero!\n");
err = -EINVAL;
goto out;
}
memset(ptr, 0xc5, dmabuf->size);
err = 0;
dma_buf_vunmap(dmabuf, ptr);
out:
dma_buf_put(dmabuf);
return err;
err_obj:
i915_gem_object_put(obj);
return err;
}
int i915_gem_dmabuf_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_dmabuf_export),
SUBTEST(igt_dmabuf_import_self),
SUBTEST(igt_dmabuf_import),
SUBTEST(igt_dmabuf_import_ownership),
SUBTEST(igt_dmabuf_export_vmap),
};
struct drm_i915_private *i915;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
err = i915_subtests(tests, i915);
drm_dev_unref(&i915->drm);
return err;
}
int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_dmabuf_export),
};
return i915_subtests(tests, i915);
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше