drm/i915: Rename local struct intel_engine_cs variables

Done by the Coccinelle script below plus a manual
intervention to GEN8_RING_SEMAPHORE_INIT.

@@
expression E;
@@
- struct intel_engine_cs *ring = E;
+ struct intel_engine_cs *engine = E;
<+...
- ring
+ engine
...+>
@@
@@
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
<+...
- ring
+ engine
...+>

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Tvrtko Ursulin 2016-03-16 11:00:36 +00:00
Родитель 08250c4ba6
Коммит e2f8039147
17 изменённых файлов: 1030 добавлений и 1000 удалений

Просмотреть файл

@ -129,7 +129,7 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
int i;
@ -143,7 +143,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
for_each_ring(ring, dev_priv, i)
for_each_ring(engine, dev_priv, i)
seq_printf(m, "%x ",
i915_gem_request_get_seqno(obj->last_read_req[i]));
seq_printf(m, "] %x %x%s%s%s",
@ -397,15 +397,15 @@ static void print_batch_pool_stats(struct seq_file *m,
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i, j;
memset(&stats, 0, sizeof(stats));
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
for_each_ring(engine, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
&ring->batch_pool.cache_list[j],
&engine->batch_pool.cache_list[j],
batch_pool_link)
per_file_stats(0, obj, &stats);
}
@ -591,14 +591,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
struct intel_engine_cs *ring =
i915_gem_request_get_ring(work->flip_queued_req);
struct intel_engine_cs *engine = i915_gem_request_get_ring(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
ring->name,
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
ring->get_seqno(ring, true),
engine->get_seqno(engine, true),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
@ -637,7 +636,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int total = 0;
int ret, i, j;
@ -645,20 +644,20 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
if (ret)
return ret;
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
for_each_ring(engine, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
count = 0;
list_for_each_entry(obj,
&ring->batch_pool.cache_list[j],
&engine->batch_pool.cache_list[j],
batch_pool_link)
count++;
seq_printf(m, "%s cache[%d]: %d objects\n",
ring->name, j, count);
engine->name, j, count);
list_for_each_entry(obj,
&ring->batch_pool.cache_list[j],
&engine->batch_pool.cache_list[j],
batch_pool_link) {
seq_puts(m, " ");
describe_obj(m, obj);
@ -681,7 +680,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any, i;
@ -690,17 +689,17 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
return ret;
any = 0;
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
int count;
count = 0;
list_for_each_entry(req, &ring->request_list, list)
list_for_each_entry(req, &engine->request_list, list)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", ring->name, count);
list_for_each_entry(req, &ring->request_list, list) {
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->request_list, list) {
struct task_struct *task;
rcu_read_lock();
@ -739,7 +738,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -747,8 +746,8 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
return ret;
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, i)
i915_ring_seqno_info(m, ring);
for_each_ring(engine, dev_priv, i)
i915_ring_seqno_info(m, engine);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@ -762,7 +761,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -934,13 +933,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
ring->name, I915_READ_IMR(ring));
engine->name, I915_READ_IMR(engine));
}
i915_ring_seqno_info(m, ring);
i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@ -981,12 +980,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
const u32 *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
hws = ring->status_page.page_addr;
engine = &dev_priv->ring[(uintptr_t)node->info_ent->data];
hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@ -1331,7 +1330,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
u32 instdone[I915_NUM_INSTDONE_REG];
@ -1344,9 +1343,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, i) {
seqno[i] = ring->get_seqno(ring, false);
acthd[i] = intel_ring_get_active_head(ring);
for_each_ring(engine, dev_priv, i) {
seqno[i] = engine->get_seqno(engine, false);
acthd[i] = intel_ring_get_active_head(engine);
}
i915_get_extra_instdone(dev, instdone);
@ -1360,17 +1359,17 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s:\n", ring->name);
for_each_ring(engine, dev_priv, i) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n",
ring->hangcheck.seqno, seqno[i]);
engine->hangcheck.seqno, seqno[i]);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)ring->hangcheck.acthd,
(long long)engine->hangcheck.acthd,
(long long)acthd[i]);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
if (ring->id == RCS) {
if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
@ -1380,7 +1379,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
ring->hangcheck.instdone[j]);
engine->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
@ -1946,7 +1945,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx;
int ret, i;
@ -1966,13 +1965,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
seq_putc(m, '\n');
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[i].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
seq_printf(m, "%s: ", ring->name);
seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
@ -2041,7 +2040,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx;
int ret, i;
@ -2056,8 +2055,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
for_each_ring(ring, dev_priv, i)
i915_dump_lrc_obj(m, ctx, ring);
for_each_ring(engine, dev_priv, i)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@ -2069,7 +2068,7 @@ static int i915_execlists(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
@ -2090,22 +2089,22 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, ring_id) {
for_each_ring(engine, dev_priv, ring_id) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
unsigned long flags;
seq_printf(m, "%s\n", ring->name);
seq_printf(m, "%s\n", engine->name);
status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
@ -2113,24 +2112,25 @@ static int i915_execlists(struct seq_file *m, void *data)
read_pointer, write_pointer);
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
spin_lock_irqsave(&ring->execlist_lock, flags);
list_for_each(cursor, &ring->execlist_queue)
spin_lock_irqsave(&engine->execlist_lock, flags);
list_for_each(cursor, &engine->execlist_queue)
count++;
head_req = list_first_entry_or_null(&ring->execlist_queue,
struct drm_i915_gem_request, execlist_link);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
spin_unlock_irqrestore(&engine->execlist_lock, flags);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(head_req->ctx, ring));
intel_execlists_ctx_id(head_req->ctx, engine));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@ -2246,19 +2246,19 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
if (!ppgtt)
return;
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for_each_ring(engine, dev_priv, unused) {
seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
pdp <<= 32;
pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
@ -2267,19 +2267,23 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s\n", ring->name);
for_each_ring(engine, dev_priv, i) {
seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine)));
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE_READ(engine)));
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@ -2334,12 +2338,12 @@ out_put:
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int count = 0;
int i;
for_each_ring(ring, i915, i)
count += ring->irq_refcount;
for_each_ring(engine, i915, i)
count += engine->irq_refcount;
return count;
}
@ -2447,7 +2451,7 @@ static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint64_t tot = 0;
uint32_t i;
@ -2462,11 +2466,11 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
seq_printf(m, "\tSubmissions: %llu %s\n",
client->submissions[ring->guc_id],
ring->name);
tot += client->submissions[ring->guc_id];
client->submissions[engine->guc_id],
engine->name);
tot += client->submissions[engine->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@ -2478,7 +2482,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc guc;
struct i915_guc_client client = {};
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
enum intel_ring_id i;
u64 total = 0;
@ -2502,11 +2506,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
ring->name, guc.submissions[ring->guc_id],
guc.last_seqno[ring->guc_id]);
total += guc.submissions[ring->guc_id];
engine->name, guc.submissions[engine->guc_id],
guc.last_seqno[engine->guc_id]);
total += guc.submissions[engine->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@ -3128,7 +3132,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
int i, j, ret;
@ -3149,10 +3153,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
uint64_t offset;
seq_printf(m, "%s\n", ring->name);
seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
@ -3174,17 +3178,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
for_each_ring(ring, dev_priv, i)
for_each_ring(engine, dev_priv, i)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(ring->semaphore.mbox.signal[j]));
I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
for (j = 0; j < num_rings; j++) {
seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
seq_printf(m, " 0x%08x ",
engine->semaphore.sync_seqno[j]);
}
seq_putc(m, '\n');
}
@ -3226,7 +3231,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
{
int i;
int ret;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -3239,9 +3244,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
for_each_ring(ring, dev_priv, i)
for_each_ring(engine, dev_priv, i)
seq_printf(m, "HW whitelist count for %s: %d\n",
ring->name, workarounds->hw_whitelist_count[i]);
engine->name, workarounds->hw_whitelist_count[i]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;

Просмотреть файл

@ -1243,11 +1243,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout,
struct intel_rps_client *rps)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
@ -1288,7 +1288,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (ret == 0)
goto out;
if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
ret = -ENODEV;
goto out;
}
@ -1296,7 +1296,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
prepare_to_wait(&ring->irq_queue, &wait, state);
prepare_to_wait(&engine->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
@ -1325,11 +1325,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
timer.function = NULL;
if (timeout || missed_irq(dev_priv, ring)) {
if (timeout || missed_irq(dev_priv, engine)) {
unsigned long expire;
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
@ -1341,9 +1341,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
}
if (!irq_test_in_progress)
ring->irq_put(ring);
engine->irq_put(engine);
finish_wait(&ring->irq_queue, &wait);
finish_wait(&engine->irq_queue, &wait);
out:
trace_i915_gem_request_wait_end(req);
@ -2404,17 +2404,17 @@ void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
ring = i915_gem_request_get_ring(req);
engine = i915_gem_request_get_ring(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
drm_gem_object_reference(&obj->base);
obj->active |= intel_ring_flag(ring);
obj->active |= intel_ring_flag(engine);
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
i915_gem_request_assign(&obj->last_read_req[engine->id], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
@ -2467,23 +2467,23 @@ static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
ret = intel_ring_idle(ring);
for_each_ring(engine, dev_priv, i) {
ret = intel_ring_idle(engine);
if (ret)
return ret;
}
i915_gem_retire_requests(dev);
/* Finally reset hw state */
for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
for_each_ring(engine, dev_priv, i) {
intel_ring_init_seqno(engine, seqno);
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
ring->semaphore.sync_seqno[j] = 0;
for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
engine->semaphore.sync_seqno[j] = 0;
}
return 0;
@ -2542,7 +2542,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
@ -2551,8 +2551,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
if (WARN_ON(request == NULL))
return;
ring = request->ring;
dev_priv = ring->dev->dev_private;
engine = request->ring;
dev_priv = engine->dev->dev_private;
ringbuf = request->ringbuf;
/*
@ -2587,9 +2587,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists)
ret = ring->emit_request(request);
ret = engine->emit_request(request);
else {
ret = ring->add_request(request);
ret = engine->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
@ -2607,13 +2607,13 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
request->previous_seqno = engine->last_submitted_seqno;
engine->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &engine->request_list);
trace_i915_gem_request_add(request);
i915_queue_hangcheck(ring->dev);
i915_queue_hangcheck(engine->dev);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@ -2885,7 +2885,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
/*
@ -2893,11 +2893,11 @@ void i915_gem_reset(struct drm_device *dev)
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_status(dev_priv, ring);
for_each_ring(engine, dev_priv, i)
i915_gem_reset_ring_status(dev_priv, engine);
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, ring);
for_each_ring(engine, dev_priv, i)
i915_gem_reset_ring_cleanup(dev_priv, engine);
i915_gem_context_reset(dev);
@ -2962,19 +2962,19 @@ bool
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
bool idle = true;
int i;
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
for_each_ring(engine, dev_priv, i) {
i915_gem_retire_requests_ring(engine);
idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) {
spin_lock_irq(&ring->execlist_lock);
idle &= list_empty(&ring->execlist_queue);
spin_unlock_irq(&ring->execlist_lock);
spin_lock_irq(&engine->execlist_lock);
idle &= list_empty(&engine->execlist_queue);
spin_unlock_irq(&engine->execlist_lock);
intel_execlists_retire_requests(ring);
intel_execlists_retire_requests(engine);
}
}
@ -3025,11 +3025,11 @@ i915_gem_idle_work_handler(struct work_struct *work)
intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) {
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
i915_gem_batch_pool_fini(&ring->batch_pool);
for_each_ring(engine, dev_priv, i)
i915_gem_batch_pool_fini(&engine->batch_pool);
mutex_unlock(&dev->struct_mutex);
}
@ -3391,15 +3391,15 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int ret, i;
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
@ -3412,7 +3412,7 @@ int i915_gpu_idle(struct drm_device *dev)
i915_add_request_no_flush(req);
}
ret = intel_ring_idle(ring);
ret = intel_ring_idle(engine);
if (ret)
return ret;
}
@ -4656,11 +4656,11 @@ static void
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
dev_priv->gt.stop_ring(ring);
for_each_ring(engine, dev_priv, i)
dev_priv->gt.stop_ring(engine);
}
int
@ -4697,8 +4697,8 @@ err:
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev;
struct intel_engine_cs *engine = req->ring;
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
@ -4716,12 +4716,12 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i]);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
intel_ring_emit(engine, remap_info[i]);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return ret;
}
@ -4829,7 +4829,7 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int ret, i, j;
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@ -4876,8 +4876,8 @@ i915_gem_init_hw(struct drm_device *dev)
}
/* Need to do basic initialisation of all rings first: */
for_each_ring(ring, dev_priv, i) {
ret = ring->init_hw(ring);
for_each_ring(engine, dev_priv, i) {
ret = engine->init_hw(engine);
if (ret)
goto out;
}
@ -4901,17 +4901,17 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
/* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
if (ring->id == RCS) {
if (engine->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++)
i915_gem_l3_remap(req, j);
}
@ -5006,11 +5006,11 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
for_each_ring(engine, dev_priv, i)
dev_priv->gt.cleanup_ring(engine);
if (i915.enable_execlists)
/*

Просмотреть файл

@ -346,11 +346,11 @@ void i915_gem_context_reset(struct drm_device *dev)
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_engine_cs *engine = &dev_priv->ring[i];
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
@ -427,11 +427,11 @@ void i915_gem_context_fini(struct drm_device *dev)
}
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_engine_cs *engine = &dev_priv->ring[i];
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
if (i915.enable_execlists) {
if (ring->init_context == NULL)
if (engine->init_context == NULL)
return 0;
ret = ring->init_context(req);
ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
@ -510,12 +510,12 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
i915_semaphore_is_enabled(engine->dev) ?
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
0;
int len, i, ret;
@ -524,21 +524,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (IS_GEN6(engine->dev)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(ring->dev)->gen < 8)
else if (INTEL_INFO(engine->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(engine->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(req, len);
@ -546,49 +546,56 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (INTEL_INFO(engine->dev)->gen >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(engine->dev), i) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_ring_emit_reg(engine,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(engine,
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine,
i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
if (INTEL_INFO(ring->dev)->gen >= 7) {
if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(engine->dev), i) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_ring_emit_reg(engine,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(engine,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return ret;
}
@ -648,25 +655,26 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
static int do_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_context *from = engine->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
if (from != NULL && engine == &dev_priv->ring[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
if (should_skip_switch(ring, from, to))
if (should_skip_switch(engine, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
if (engine == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
get_context_alignment(engine->dev),
0);
if (ret)
return ret;
}
@ -676,23 +684,23 @@ static int do_switch(struct drm_i915_gem_request *req)
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
from = ring->last_context;
from = engine->last_context;
if (needs_pd_load_pre(ring, to)) {
if (needs_pd_load_pre(engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
}
if (ring != &dev_priv->ring[RCS]) {
if (engine != &dev_priv->ring[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
@ -717,14 +725,14 @@ static int do_switch(struct drm_i915_gem_request *req)
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
(intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
}
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
WARN_ON(needs_pd_load_pre(engine, to) &&
needs_pd_load_post(engine, to, hw_flags));
ret = mi_set_context(req, hw_flags);
if (ret)
@ -733,8 +741,8 @@ static int do_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
if (needs_pd_load_post(engine, to, hw_flags)) {
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
@ -787,11 +795,11 @@ static int do_switch(struct drm_i915_gem_request *req)
done:
i915_gem_context_reference(to);
ring->last_context = to;
engine->last_context = to;
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(req);
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
@ -800,7 +808,7 @@ done:
return 0;
unpin_out:
if (ring->id == RCS)
if (engine->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@ -820,18 +828,18 @@ unpin_out:
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = engine->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (req->ctx != ring->last_context) {
if (req->ctx != engine->last_context) {
i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = req->ctx;
if (engine->last_context)
i915_gem_context_unreference(engine->last_context);
engine->last_context = req->ctx;
}
return 0;
}

Просмотреть файл

@ -36,29 +36,30 @@ i915_verify_lists(struct drm_device *dev)
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int err = 0;
int i;
if (warned)
return 0;
for_each_ring(ring, dev_priv, i) {
list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
for_each_ring(engine, dev_priv, i) {
list_for_each_entry(obj, &engine->active_list,
ring_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
break;
} else if (!obj->active ||
obj->last_read_req[ring->id] == NULL) {
obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
ring->name,
engine->name,
obj, obj->base.write_domain);
err++;
}

Просмотреть файл

@ -1095,7 +1095,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct intel_engine_cs *engine = i915_gem_request_get_ring(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@ -1122,7 +1122,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@ -1146,11 +1146,11 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
if (!IS_GEN7(dev) || engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@ -1160,12 +1160,12 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(engine, 0);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return 0;
}
@ -1229,7 +1229,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct intel_engine_cs *engine = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
@ -1244,8 +1244,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
"%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
@ -1253,7 +1253,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@ -1280,17 +1280,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
if (ring == &dev_priv->ring[RCS] &&
if (engine == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, INSTPM);
intel_ring_emit(engine, instp_mask << 16 | instp_mode);
intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
@ -1308,7 +1308,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
ret = ring->dispatch_execbuffer(params->request,
ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
@ -1432,7 +1432,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
@ -1459,7 +1459,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
ret = eb_select_ring(dev_priv, file, args, &ring);
ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
@ -1473,9 +1473,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
if (ring->id != RCS) {
if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
ring->name);
engine->name);
return -EINVAL;
}
@ -1488,7 +1488,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
@ -1522,7 +1522,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
&need_relocs);
if (ret)
goto err;
@ -1531,7 +1532,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
@ -1547,16 +1549,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) {
if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
parsed_batch_obj = i915_gem_execbuffer_parse(engine,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
@ -1608,7 +1610,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
req = i915_gem_request_alloc(ring, ctx);
req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
@ -1626,7 +1628,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->dev = dev;
params->file = file;
params->ring = ring;
params->ring = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;

Просмотреть файл

@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
BUG_ON(entry >= 4);
@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
intel_ring_emit(engine, upper_32_bits(addr));
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
intel_ring_emit(engine, lower_32_bits(addr));
intel_ring_advance(engine);
return 0;
}
@ -1650,11 +1650,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@ -1662,13 +1662,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
intel_ring_emit(engine, PP_DIR_DCLV_2G);
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
intel_ring_emit(engine, get_pd_offset(ppgtt));
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return 0;
}
@ -1676,22 +1676,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@ -1699,17 +1699,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
intel_ring_emit(engine, PP_DIR_DCLV_2G);
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
intel_ring_emit(engine, get_pd_offset(ppgtt));
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (ring->id != RCS) {
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (engine->id != RCS) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
@ -1720,15 +1720,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_DCLV(ring));
POSTING_READ(RING_PP_DIR_DCLV(engine));
return 0;
}
@ -1736,12 +1736,12 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int j;
for_each_ring(ring, dev_priv, j) {
for_each_ring(engine, dev_priv, j) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
I915_WRITE(RING_MODE_GEN7(ring),
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
@ -1749,7 +1749,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
int i;
@ -1765,9 +1765,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(ring),
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@ -2286,15 +2286,15 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
if (INTEL_INFO(dev)->gen < 6)
return;
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(ring));
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
@ -2305,7 +2305,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
I915_WRITE(RING_FAULT_REG(ring),
I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
}
}

Просмотреть файл

@ -495,9 +495,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) {
u64 wa_ctx_offset = obj->gtt_offset;
u32 *wa_ctx_page = &obj->pages[0][0];
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
u32 wa_ctx_size = (ring->wa_ctx.indirect_ctx.size +
ring->wa_ctx.per_ctx.size);
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
engine->wa_ctx.per_ctx.size);
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
dev_priv->ring[i].name, wa_ctx_offset);
@ -1019,19 +1019,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_engine_cs *engine = &dev_priv->ring[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
if (ring->dev == NULL)
if (engine->dev == NULL)
continue;
error->ring[i].valid = true;
i915_record_ring_state(dev, error, ring, &error->ring[i]);
i915_record_ring_state(dev, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(ring);
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
@ -1051,7 +1051,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
@ -1073,11 +1073,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
* executed).
*/
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
rbuf = request->ctx->engine[engine->id].ringbuf;
else
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
} else
rbuf = ring->buffer;
rbuf = engine->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
@ -1086,18 +1086,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_error_ggtt_object_create(dev_priv,
engine->status_page.obj);
if (ring->wa_ctx.obj) {
if (engine->wa_ctx.obj) {
error->ring[i].wa_ctx =
i915_error_ggtt_object_create(dev_priv,
ring->wa_ctx.obj);
engine->wa_ctx.obj);
}
i915_gem_record_active_context(ring, error, &error->ring[i]);
i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
list_for_each_entry(request, &ring->request_list, list)
list_for_each_entry(request, &engine->request_list, list)
count++;
error->ring[i].num_requests = count;
@ -1110,7 +1111,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
count = 0;
list_for_each_entry(request, &ring->request_list, list) {
list_for_each_entry(request, &engine->request_list, list) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {

Просмотреть файл

@ -377,7 +377,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
for_each_ring(ring, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
for_each_ring(engine, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@ -406,14 +406,14 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
if (!obj)
break; /* XXX: continue? */
ctx_desc = intel_lr_context_descriptor(ctx, ring);
ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[i].ringbuf->obj;
@ -422,7 +422,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << ring->guc_id);
desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
@ -839,7 +839,7 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct page *page;
u32 size, i;
@ -867,11 +867,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
engine = &dev_priv->ring[RCS];
ads->golden_context_lrca = engine->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
for_each_ring(engine, dev_priv, i)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
@ -883,12 +883,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_ring(ring, dev_priv, i) {
reg_state->mmio_white_list[ring->guc_id].mmio_start =
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
for_each_ring(engine, dev_priv, i) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[ring->guc_id].count = 0;
reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +

Просмотреть файл

@ -1079,11 +1079,11 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
if (ring->irq_refcount)
for_each_ring(engine, dev_priv, i)
if (engine->irq_refcount)
return true;
return false;
@ -2449,7 +2449,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
/*
@ -2460,8 +2460,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
for_each_ring(engine, dev_priv, i)
wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
@ -2956,11 +2956,11 @@ static int semaphore_passed(struct intel_engine_cs *ring)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i)
ring->hangcheck.deadlock = 0;
for_each_ring(engine, dev_priv, i)
engine->hangcheck.deadlock = 0;
}
static bool subunits_stuck(struct intel_engine_cs *ring)
@ -3071,7 +3071,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
@ -3096,33 +3096,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
u64 acthd;
u32 seqno;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
seqno = ring->get_seqno(ring, false);
acthd = intel_ring_get_active_head(ring);
seqno = engine->get_seqno(engine, false);
acthd = intel_ring_get_active_head(engine);
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
ring->hangcheck.action = HANGCHECK_IDLE;
if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) {
if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
ring->name);
wake_up_all(&ring->irq_queue);
engine->name);
wake_up_all(&engine->irq_queue);
}
/* Safeguard against driver failure */
ring->hangcheck.score += BUSY;
engine->hangcheck.score += BUSY;
} else
busy = false;
} else {
@ -3141,53 +3141,53 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
ring->hangcheck.action = ring_stuck(ring,
acthd);
engine->hangcheck.action = ring_stuck(engine,
acthd);
switch (ring->hangcheck.action) {
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
break;
case HANGCHECK_ACTIVE:
ring->hangcheck.score += BUSY;
engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
ring->hangcheck.score += KICK;
engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
ring->hangcheck.score += HUNG;
engine->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
}
} else {
ring->hangcheck.action = HANGCHECK_ACTIVE;
engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score -= ACTIVE_DECAY;
if (ring->hangcheck.score < 0)
ring->hangcheck.score = 0;
if (engine->hangcheck.score > 0)
engine->hangcheck.score -= ACTIVE_DECAY;
if (engine->hangcheck.score < 0)
engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = 0;
engine->hangcheck.acthd = 0;
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
memset(engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
}
ring->hangcheck.seqno = seqno;
ring->hangcheck.acthd = acthd;
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
busy_count += busy;
}
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
for_each_ring(engine, dev_priv, i) {
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
engine->name);
rings_hung++;
}
}

Просмотреть файл

@ -10984,7 +10984,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@ -11000,13 +11000,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@ -11019,7 +11019,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@ -11032,13 +11032,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@ -11051,7 +11051,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@ -11065,10 +11065,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
intel_ring_emit(engine, fb->pitches[0]);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@ -11077,7 +11077,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@ -11090,7 +11090,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@ -11100,10 +11100,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
if (ret)
return ret;
intel_ring_emit(ring, MI_DISPLAY_FLIP |
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@ -11113,7 +11113,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@ -11126,7 +11126,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@ -11147,7 +11147,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
}
len = 4;
if (ring->id == RCS) {
if (engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
@ -11185,30 +11185,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (ring->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (engine->id == RCS) {
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, DERRMR);
intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_emit_reg(engine, DERRMR);
intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, 0);
intel_ring_emit(engine, MI_NOOP);
}
}
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@ -11488,7 +11488,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
@ -11575,21 +11575,21 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
engine = &dev_priv->ring[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
engine = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = i915_gem_request_get_ring(obj->last_write_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
engine = i915_gem_request_get_ring(obj->last_write_req);
if (engine == NULL || engine->id != RCS)
engine = &dev_priv->ring[BCS];
} else {
ring = &dev_priv->ring[RCS];
engine = &dev_priv->ring[RCS];
}
mmio_flip = use_mmio_flip(ring, obj);
mmio_flip = use_mmio_flip(engine, obj);
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
@ -11597,7 +11597,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, ring, &request);
ret = i915_gem_object_sync(obj, engine, &request);
if (ret)
goto cleanup_pending;
}
@ -11619,7 +11619,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req);
} else {
if (!request) {
request = i915_gem_request_alloc(ring, NULL);
request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;

Просмотреть файл

@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i, irqs;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(ring), irqs);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |

Просмотреть файл

@ -360,8 +360,8 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct intel_engine_cs *ring = rq0->ring;
struct drm_device *dev = ring->dev;
struct intel_engine_cs *engine = rq0->ring;
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t desc[2];
@ -376,15 +376,15 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
rq0->elsp_submitted++;
/* You must always write both descriptors in the order below. */
I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
/* ELSP is a wo register, use another nearby reg for posting */
POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
}
static void
@ -398,9 +398,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static void execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_engine_cs *ring = rq->ring;
struct intel_engine_cs *engine = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
@ -609,25 +609,25 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
static void execlists_context_queue(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
struct intel_engine_cs *engine = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
if (request->ctx != request->i915->kernel_context)
intel_lr_context_pin(request->ctx, ring);
intel_lr_context_pin(request->ctx, engine);
i915_gem_request_reference(request);
spin_lock_irq(&ring->execlist_lock);
spin_lock_irq(&engine->execlist_lock);
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
if (num_elements > 2) {
struct drm_i915_gem_request *tail_req;
tail_req = list_last_entry(&ring->execlist_queue,
tail_req = list_last_entry(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
@ -635,32 +635,32 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_move_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
&engine->execlist_retired_req_list);
}
}
list_add_tail(&request->execlist_link, &ring->execlist_queue);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
execlists_context_unqueue(engine);
spin_unlock_irq(&ring->execlist_lock);
spin_unlock_irq(&engine->execlist_lock);
}
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
uint32_t flush_domains;
int ret;
flush_domains = 0;
if (ring->gpu_caches_dirty)
if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
engine->gpu_caches_dirty = false;
return 0;
}
@ -726,7 +726,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
int bytes)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct drm_i915_gem_request *target;
unsigned space;
int ret;
@ -737,7 +737,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
/* The whole point of reserving space is to not wait! */
WARN_ON(ringbuf->reserved_in_use);
list_for_each_entry(target, &ring->request_list, list) {
list_for_each_entry(target, &engine->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
@ -753,7 +753,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
break;
}
if (WARN_ON(&target->list == &ring->request_list))
if (WARN_ON(&target->list == &engine->request_list))
return -ENOSPC;
ret = i915_wait_request(target);
@ -947,9 +947,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct intel_engine_cs *engine = params->ring;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
u32 instp_mask;
@ -961,7 +961,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
if (instp_mode != 0 && engine != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@ -990,7 +990,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
if (ring == &dev_priv->ring[RCS] &&
if (engine == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_logical_ring_begin(params->request, 4);
if (ret)
@ -1008,7 +1008,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
args->batch_start_offset;
ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
@ -1071,17 +1071,17 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
int ret;
if (!ring->gpu_caches_dirty)
if (!engine->gpu_caches_dirty)
return 0;
ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
engine->gpu_caches_dirty = false;
return 0;
}
@ -1172,16 +1172,16 @@ void intel_lr_context_unpin(struct intel_context *ctx,
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
struct drm_device *dev = ring->dev;
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
ring->gpu_caches_dirty = true;
engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
@ -1199,7 +1199,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
@ -1643,7 +1643,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->ring;
struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
@ -1656,9 +1656,11 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
intel_logical_ring_emit_reg(ringbuf,
GEN8_RING_PDP_UDW(engine, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
intel_logical_ring_emit_reg(ringbuf,
GEN8_RING_PDP_LDW(engine, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
@ -1748,8 +1750,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 unused)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct intel_engine_cs *engine = ringbuf->ring;
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
@ -1769,7 +1771,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
if (ring == &dev_priv->ring[VCS])
if (engine == &dev_priv->ring[VCS])
cmd |= MI_INVALIDATE_BSD;
}
@ -1789,8 +1791,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 flush_domains)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
struct intel_engine_cs *engine = ringbuf->ring;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false;
u32 flags = 0;
int ret;
@ -1818,7 +1820,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
if (IS_GEN9(ring->dev))
if (IS_GEN9(engine->dev))
vf_flush_wa = true;
}
@ -2109,38 +2111,38 @@ error:
static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
int ret;
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
ring->guc_id = GUC_RENDER_ENGINE;
ring->mmio_base = RENDER_RING_BASE;
engine->name = "render ring";
engine->id = RCS;
engine->exec_id = I915_EXEC_RENDER;
engine->guc_id = GUC_RENDER_ENGINE;
engine->mmio_base = RENDER_RING_BASE;
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
logical_ring_default_vfuncs(dev, ring);
logical_ring_default_vfuncs(dev, engine);
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
engine->init_hw = gen9_init_render_ring;
else
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
ring->emit_flush = gen8_emit_flush_render;
ring->emit_request = gen8_emit_request_render;
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->cleanup = intel_fini_pipe_control;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
ring->dev = dev;
engine->dev = dev;
ret = intel_init_pipe_control(ring);
ret = intel_init_pipe_control(engine);
if (ret)
return ret;
ret = intel_init_workaround_bb(ring);
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
* We continue even if we fail to initialize WA batch
@ -2151,9 +2153,9 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
ret = logical_ring_init(dev, ring);
ret = logical_ring_init(dev, engine);
if (ret) {
lrc_destroy_wa_ctx_obj(ring);
lrc_destroy_wa_ctx_obj(engine);
}
return ret;
@ -2162,69 +2164,69 @@ static int logical_render_ring_init(struct drm_device *dev)
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS];
struct intel_engine_cs *engine = &dev_priv->ring[VCS];
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
ring->guc_id = GUC_VIDEO_ENGINE;
ring->mmio_base = GEN6_BSD_RING_BASE;
engine->name = "bsd ring";
engine->id = VCS;
engine->exec_id = I915_EXEC_BSD;
engine->guc_id = GUC_VIDEO_ENGINE;
engine->mmio_base = GEN6_BSD_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, engine);
return logical_ring_init(dev, ring);
return logical_ring_init(dev, engine);
}
static int logical_bsd2_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
struct intel_engine_cs *engine = &dev_priv->ring[VCS2];
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
ring->guc_id = GUC_VIDEO_ENGINE2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
engine->name = "bsd2 ring";
engine->id = VCS2;
engine->exec_id = I915_EXEC_BSD;
engine->guc_id = GUC_VIDEO_ENGINE2;
engine->mmio_base = GEN8_BSD2_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, engine);
return logical_ring_init(dev, ring);
return logical_ring_init(dev, engine);
}
static int logical_blt_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[BCS];
struct intel_engine_cs *engine = &dev_priv->ring[BCS];
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
ring->guc_id = GUC_BLITTER_ENGINE;
ring->mmio_base = BLT_RING_BASE;
engine->name = "blitter ring";
engine->id = BCS;
engine->exec_id = I915_EXEC_BLT;
engine->guc_id = GUC_BLITTER_ENGINE;
engine->mmio_base = BLT_RING_BASE;
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, engine);
return logical_ring_init(dev, ring);
return logical_ring_init(dev, engine);
}
static int logical_vebox_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VECS];
struct intel_engine_cs *engine = &dev_priv->ring[VECS];
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
ring->mmio_base = VEBOX_RING_BASE;
engine->name = "video enhancement ring";
engine->id = VECS;
engine->exec_id = I915_EXEC_VEBOX;
engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
engine->mmio_base = VEBOX_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, engine);
return logical_ring_init(dev, ring);
return logical_ring_init(dev, engine);
}
/**
@ -2639,14 +2641,14 @@ void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int i;
for_each_ring(ring, dev_priv, i) {
for_each_ring(engine, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[ring->id].ringbuf;
ctx->engine[engine->id].ringbuf;
uint32_t *reg_state;
struct page *page;

Просмотреть файл

@ -324,11 +324,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
if (get_mocs_settings(req->ring->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
enum intel_ring_id ring_id;
/* Program the control registers */
for_each_ring(ring, dev_priv, ring_id) {
for_each_ring(engine, dev_priv, ring_id) {
ret = emit_mocs_control_table(req, &t, ring_id);
if (ret)
return ret;

Просмотреть файл

@ -233,14 +233,14 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@ -267,7 +267,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@ -283,7 +283,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
return ret;
}
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(engine, flip_addr);
intel_ring_advance(engine);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
@ -336,7 +336,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
@ -349,7 +349,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
@ -360,22 +360,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
} else {
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
@ -408,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_engine_cs *engine = &dev_priv->ring[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@ -423,7 +424,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(ring, NULL);
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
@ -433,9 +434,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return ret;
}
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(engine,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);

Просмотреть файл

@ -4815,7 +4815,7 @@ static void gen9_enable_rps(struct drm_device *dev)
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
@ -4838,8 +4838,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
@ -4885,7 +4885,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
int unused;
@ -4906,8 +4906,8 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@ -4967,7 +4967,7 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@ -5003,8 +5003,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@ -5495,7 +5495,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
int i;
@ -5522,8 +5522,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
@ -5593,7 +5593,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
@ -5633,8 +5633,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
for_each_ring(engine, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@ -6010,7 +6010,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
bool ret = false;
int i;
@ -6019,8 +6019,8 @@ bool i915_gpu_busy(void)
goto out_unlock;
dev_priv = i915_mch_dev;
for_each_ring(ring, dev_priv, i)
ret |= !list_empty(&ring->request_list);
for_each_ring(engine, dev_priv, i)
ret |= !list_empty(&engine->request_list);
out_unlock:
spin_unlock_irq(&mchdev_lock);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -63,16 +63,16 @@ struct intel_hw_status_page {
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT do { \
#define GEN8_RING_SEMAPHORE_INIT(e) do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {