drm/i915: More renaming of rings to engines
This time using only sed and a few by hand. v2: Rename also intel_ring_id and intel_ring_initialized. v3: Fixed typo in intel_ring_initialized. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1458126040-33105-1-git-send-email-tvrtko.ursulin@linux.intel.com
This commit is contained in:
Родитель
666796da7a
Коммит
117897f42c
|
@ -2483,7 +2483,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||||
struct intel_guc guc;
|
struct intel_guc guc;
|
||||||
struct i915_guc_client client = {};
|
struct i915_guc_client client = {};
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_ring_id i;
|
enum intel_engine_id i;
|
||||||
u64 total = 0;
|
u64 total = 0;
|
||||||
|
|
||||||
if (!HAS_GUC_SCHED(dev_priv->dev))
|
if (!HAS_GUC_SCHED(dev_priv->dev))
|
||||||
|
|
|
@ -87,16 +87,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||||
value = 1;
|
value = 1;
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_BSD:
|
case I915_PARAM_HAS_BSD:
|
||||||
value = intel_ring_initialized(&dev_priv->engine[VCS]);
|
value = intel_engine_initialized(&dev_priv->engine[VCS]);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_BLT:
|
case I915_PARAM_HAS_BLT:
|
||||||
value = intel_ring_initialized(&dev_priv->engine[BCS]);
|
value = intel_engine_initialized(&dev_priv->engine[BCS]);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_VEBOX:
|
case I915_PARAM_HAS_VEBOX:
|
||||||
value = intel_ring_initialized(&dev_priv->engine[VECS]);
|
value = intel_engine_initialized(&dev_priv->engine[VECS]);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_BSD2:
|
case I915_PARAM_HAS_BSD2:
|
||||||
value = intel_ring_initialized(&dev_priv->engine[VCS2]);
|
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_RELAXED_FENCING:
|
case I915_PARAM_HAS_RELAXED_FENCING:
|
||||||
value = 1;
|
value = 1;
|
||||||
|
@ -444,7 +444,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||||
|
|
||||||
cleanup_gem:
|
cleanup_gem:
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_engines(dev);
|
||||||
i915_gem_context_fini(dev);
|
i915_gem_context_fini(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
cleanup_irq:
|
cleanup_irq:
|
||||||
|
@ -1261,7 +1261,7 @@ int i915_driver_unload(struct drm_device *dev)
|
||||||
|
|
||||||
intel_guc_ucode_fini(dev);
|
intel_guc_ucode_fini(dev);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_engines(dev);
|
||||||
i915_gem_context_fini(dev);
|
i915_gem_context_fini(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
intel_fbc_cleanup_cfb(dev_priv);
|
intel_fbc_cleanup_cfb(dev_priv);
|
||||||
|
|
|
@ -1931,9 +1931,9 @@ struct drm_i915_private {
|
||||||
int (*execbuf_submit)(struct i915_execbuffer_params *params,
|
int (*execbuf_submit)(struct i915_execbuffer_params *params,
|
||||||
struct drm_i915_gem_execbuffer2 *args,
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
struct list_head *vmas);
|
struct list_head *vmas);
|
||||||
int (*init_rings)(struct drm_device *dev);
|
int (*init_engines)(struct drm_device *dev);
|
||||||
void (*cleanup_ring)(struct intel_engine_cs *ring);
|
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||||
void (*stop_ring)(struct intel_engine_cs *ring);
|
void (*stop_engine)(struct intel_engine_cs *engine);
|
||||||
} gt;
|
} gt;
|
||||||
|
|
||||||
struct intel_context *kernel_context;
|
struct intel_context *kernel_context;
|
||||||
|
@ -1969,7 +1969,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
||||||
/* Iterate over initialised rings */
|
/* Iterate over initialised rings */
|
||||||
#define for_each_engine(ring__, dev_priv__, i__) \
|
#define for_each_engine(ring__, dev_priv__, i__) \
|
||||||
for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
|
for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
|
||||||
for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_ring_initialized((ring__))))
|
for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
|
||||||
|
|
||||||
enum hdmi_force_audio {
|
enum hdmi_force_audio {
|
||||||
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
|
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
|
||||||
|
@ -2039,7 +2039,7 @@ struct drm_i915_gem_object {
|
||||||
struct drm_mm_node *stolen;
|
struct drm_mm_node *stolen;
|
||||||
struct list_head global_list;
|
struct list_head global_list;
|
||||||
|
|
||||||
struct list_head ring_list[I915_NUM_ENGINES];
|
struct list_head engine_list[I915_NUM_ENGINES];
|
||||||
/** Used in execbuf to temporarily hold a ref */
|
/** Used in execbuf to temporarily hold a ref */
|
||||||
struct list_head obj_exec_link;
|
struct list_head obj_exec_link;
|
||||||
|
|
||||||
|
@ -3002,11 +3002,11 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
|
||||||
void i915_gem_reset(struct drm_device *dev);
|
void i915_gem_reset(struct drm_device *dev);
|
||||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||||
int __must_check i915_gem_init(struct drm_device *dev);
|
int __must_check i915_gem_init(struct drm_device *dev);
|
||||||
int i915_gem_init_rings(struct drm_device *dev);
|
int i915_gem_init_engines(struct drm_device *dev);
|
||||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
||||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||||
|
|
|
@ -2413,7 +2413,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||||
drm_gem_object_reference(&obj->base);
|
drm_gem_object_reference(&obj->base);
|
||||||
obj->active |= intel_engine_flag(engine);
|
obj->active |= intel_engine_flag(engine);
|
||||||
|
|
||||||
list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
|
list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
|
||||||
i915_gem_request_assign(&obj->last_read_req[engine->id], req);
|
i915_gem_request_assign(&obj->last_read_req[engine->id], req);
|
||||||
|
|
||||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||||
|
@ -2437,7 +2437,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
||||||
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
|
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
|
||||||
RQ_BUG_ON(!(obj->active & (1 << ring)));
|
RQ_BUG_ON(!(obj->active & (1 << ring)));
|
||||||
|
|
||||||
list_del_init(&obj->ring_list[ring]);
|
list_del_init(&obj->engine_list[ring]);
|
||||||
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
|
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
|
||||||
|
|
||||||
if (obj->last_write_req && obj->last_write_req->engine->id == ring)
|
if (obj->last_write_req && obj->last_write_req->engine->id == ring)
|
||||||
|
@ -2830,7 +2830,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
obj = list_first_entry(&engine->active_list,
|
obj = list_first_entry(&engine->active_list,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
ring_list[engine->id]);
|
engine_list[engine->id]);
|
||||||
|
|
||||||
i915_gem_object_retire__read(obj, engine->id);
|
i915_gem_object_retire__read(obj, engine->id);
|
||||||
}
|
}
|
||||||
|
@ -2941,7 +2941,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
|
||||||
|
|
||||||
obj = list_first_entry(&engine->active_list,
|
obj = list_first_entry(&engine->active_list,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
ring_list[engine->id]);
|
engine_list[engine->id]);
|
||||||
|
|
||||||
if (!list_empty(&obj->last_read_req[engine->id]->list))
|
if (!list_empty(&obj->last_read_req[engine->id]->list))
|
||||||
break;
|
break;
|
||||||
|
@ -4448,7 +4448,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||||
|
|
||||||
INIT_LIST_HEAD(&obj->global_list);
|
INIT_LIST_HEAD(&obj->global_list);
|
||||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||||
INIT_LIST_HEAD(&obj->ring_list[i]);
|
INIT_LIST_HEAD(&obj->engine_list[i]);
|
||||||
INIT_LIST_HEAD(&obj->obj_exec_link);
|
INIT_LIST_HEAD(&obj->obj_exec_link);
|
||||||
INIT_LIST_HEAD(&obj->vma_list);
|
INIT_LIST_HEAD(&obj->vma_list);
|
||||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||||
|
@ -4653,14 +4653,14 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
i915_gem_stop_ringbuffers(struct drm_device *dev)
|
i915_gem_stop_engines(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_engine(engine, dev_priv, i)
|
for_each_engine(engine, dev_priv, i)
|
||||||
dev_priv->gt.stop_ring(engine);
|
dev_priv->gt.stop_engine(engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -4676,7 +4676,7 @@ i915_gem_suspend(struct drm_device *dev)
|
||||||
|
|
||||||
i915_gem_retire_requests(dev);
|
i915_gem_retire_requests(dev);
|
||||||
|
|
||||||
i915_gem_stop_ringbuffers(dev);
|
i915_gem_stop_engines(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||||
|
@ -4778,7 +4778,7 @@ static void init_unused_rings(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_gem_init_rings(struct drm_device *dev)
|
int i915_gem_init_engines(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -4814,13 +4814,13 @@ int i915_gem_init_rings(struct drm_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cleanup_vebox_ring:
|
cleanup_vebox_ring:
|
||||||
intel_cleanup_ring_buffer(&dev_priv->engine[VECS]);
|
intel_cleanup_engine(&dev_priv->engine[VECS]);
|
||||||
cleanup_blt_ring:
|
cleanup_blt_ring:
|
||||||
intel_cleanup_ring_buffer(&dev_priv->engine[BCS]);
|
intel_cleanup_engine(&dev_priv->engine[BCS]);
|
||||||
cleanup_bsd_ring:
|
cleanup_bsd_ring:
|
||||||
intel_cleanup_ring_buffer(&dev_priv->engine[VCS]);
|
intel_cleanup_engine(&dev_priv->engine[VCS]);
|
||||||
cleanup_render_ring:
|
cleanup_render_ring:
|
||||||
intel_cleanup_ring_buffer(&dev_priv->engine[RCS]);
|
intel_cleanup_engine(&dev_priv->engine[RCS]);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -4907,7 +4907,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||||
req = i915_gem_request_alloc(engine, NULL);
|
req = i915_gem_request_alloc(engine, NULL);
|
||||||
if (IS_ERR(req)) {
|
if (IS_ERR(req)) {
|
||||||
ret = PTR_ERR(req);
|
ret = PTR_ERR(req);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_engines(dev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4920,7 +4920,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||||
if (ret && ret != -EIO) {
|
if (ret && ret != -EIO) {
|
||||||
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
||||||
i915_gem_request_cancel(req);
|
i915_gem_request_cancel(req);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_engines(dev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4928,7 +4928,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||||
if (ret && ret != -EIO) {
|
if (ret && ret != -EIO) {
|
||||||
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
||||||
i915_gem_request_cancel(req);
|
i915_gem_request_cancel(req);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_engines(dev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4952,14 +4952,14 @@ int i915_gem_init(struct drm_device *dev)
|
||||||
|
|
||||||
if (!i915.enable_execlists) {
|
if (!i915.enable_execlists) {
|
||||||
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
|
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
|
||||||
dev_priv->gt.init_rings = i915_gem_init_rings;
|
dev_priv->gt.init_engines = i915_gem_init_engines;
|
||||||
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
|
dev_priv->gt.cleanup_engine = intel_cleanup_engine;
|
||||||
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
|
dev_priv->gt.stop_engine = intel_stop_engine;
|
||||||
} else {
|
} else {
|
||||||
dev_priv->gt.execbuf_submit = intel_execlists_submission;
|
dev_priv->gt.execbuf_submit = intel_execlists_submission;
|
||||||
dev_priv->gt.init_rings = intel_logical_rings_init;
|
dev_priv->gt.init_engines = intel_logical_rings_init;
|
||||||
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
|
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
|
||||||
dev_priv->gt.stop_ring = intel_logical_ring_stop;
|
dev_priv->gt.stop_engine = intel_logical_ring_stop;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is just a security blanket to placate dragons.
|
/* This is just a security blanket to placate dragons.
|
||||||
|
@ -4980,7 +4980,7 @@ int i915_gem_init(struct drm_device *dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
ret = dev_priv->gt.init_rings(dev);
|
ret = dev_priv->gt.init_engines(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
@ -5003,14 +5003,14 @@ out_unlock:
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
i915_gem_cleanup_engines(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_engine(engine, dev_priv, i)
|
for_each_engine(engine, dev_priv, i)
|
||||||
dev_priv->gt.cleanup_ring(engine);
|
dev_priv->gt.cleanup_engine(engine);
|
||||||
|
|
||||||
if (i915.enable_execlists)
|
if (i915.enable_execlists)
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -45,7 +45,7 @@ i915_verify_lists(struct drm_device *dev)
|
||||||
|
|
||||||
for_each_engine(engine, dev_priv, i) {
|
for_each_engine(engine, dev_priv, i) {
|
||||||
list_for_each_entry(obj, &engine->active_list,
|
list_for_each_entry(obj, &engine->active_list,
|
||||||
ring_list[engine->id]) {
|
engine_list[engine->id]) {
|
||||||
if (obj->base.dev != dev ||
|
if (obj->base.dev != dev ||
|
||||||
!atomic_read(&obj->base.refcount.refcount)) {
|
!atomic_read(&obj->base.refcount.refcount)) {
|
||||||
DRM_ERROR("%s: freed active obj %p\n",
|
DRM_ERROR("%s: freed active obj %p\n",
|
||||||
|
|
|
@ -1369,7 +1369,7 @@ eb_get_batch(struct eb_vmas *eb)
|
||||||
|
|
||||||
#define I915_USER_RINGS (4)
|
#define I915_USER_RINGS (4)
|
||||||
|
|
||||||
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
|
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
|
||||||
[I915_EXEC_DEFAULT] = RCS,
|
[I915_EXEC_DEFAULT] = RCS,
|
||||||
[I915_EXEC_RENDER] = RCS,
|
[I915_EXEC_RENDER] = RCS,
|
||||||
[I915_EXEC_BLT] = BCS,
|
[I915_EXEC_BLT] = BCS,
|
||||||
|
@ -1417,7 +1417,7 @@ eb_select_ring(struct drm_i915_private *dev_priv,
|
||||||
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
|
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!intel_ring_initialized(*ring)) {
|
if (!intel_engine_initialized(*ring)) {
|
||||||
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -996,7 +996,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
||||||
|
|
||||||
static void notify_ring(struct intel_engine_cs *engine)
|
static void notify_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
if (!intel_ring_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trace_i915_gem_request_notify(engine);
|
trace_i915_gem_request_notify(engine);
|
||||||
|
|
|
@ -795,7 +795,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||||
intel_logical_ring_advance(ringbuf);
|
intel_logical_ring_advance(ringbuf);
|
||||||
|
|
||||||
if (intel_ring_stopped(engine))
|
if (intel_engine_stopped(engine))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (engine->last_context != request->ctx) {
|
if (engine->last_context != request->ctx) {
|
||||||
|
@ -1054,7 +1054,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!intel_ring_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = intel_engine_idle(engine);
|
ret = intel_engine_idle(engine);
|
||||||
|
@ -2012,7 +2012,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv;
|
struct drm_i915_private *dev_priv;
|
||||||
|
|
||||||
if (!intel_ring_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev_priv = engine->dev->dev_private;
|
dev_priv = engine->dev->dev_private;
|
||||||
|
@ -2240,7 +2240,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
|
||||||
* @dev: DRM device.
|
* @dev: DRM device.
|
||||||
*
|
*
|
||||||
* This function inits the engines for an Execlists submission style (the equivalent in the
|
* This function inits the engines for an Execlists submission style (the equivalent in the
|
||||||
* legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
|
* legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
|
||||||
* those engines that are present in the hardware.
|
* those engines that are present in the hardware.
|
||||||
*
|
*
|
||||||
* Return: non-zero if the initialization failed.
|
* Return: non-zero if the initialization failed.
|
||||||
|
|
|
@ -159,7 +159,7 @@ static bool get_mocs_settings(struct drm_device *dev,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
|
static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
|
||||||
{
|
{
|
||||||
switch (ring) {
|
switch (ring) {
|
||||||
case RCS:
|
case RCS:
|
||||||
|
@ -191,7 +191,7 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
|
||||||
*/
|
*/
|
||||||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||||
const struct drm_i915_mocs_table *table,
|
const struct drm_i915_mocs_table *table,
|
||||||
enum intel_ring_id ring)
|
enum intel_engine_id ring)
|
||||||
{
|
{
|
||||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
@ -325,7 +325,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
||||||
if (get_mocs_settings(req->engine->dev, &t)) {
|
if (get_mocs_settings(req->engine->dev, &t)) {
|
||||||
struct drm_i915_private *dev_priv = req->i915;
|
struct drm_i915_private *dev_priv = req->i915;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_ring_id ring_id;
|
enum intel_engine_id ring_id;
|
||||||
|
|
||||||
/* Program the control registers */
|
/* Program the control registers */
|
||||||
for_each_engine(engine, dev_priv, ring_id) {
|
for_each_engine(engine, dev_priv, ring_id) {
|
||||||
|
|
|
@ -59,7 +59,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
|
||||||
return ringbuf->space;
|
return ringbuf->space;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool intel_ring_stopped(struct intel_engine_cs *engine)
|
bool intel_engine_stopped(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||||
return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
|
return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
|
||||||
|
@ -69,7 +69,7 @@ static void __intel_ring_advance(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||||
ringbuf->tail &= ringbuf->size - 1;
|
ringbuf->tail &= ringbuf->size - 1;
|
||||||
if (intel_ring_stopped(engine))
|
if (intel_engine_stopped(engine))
|
||||||
return;
|
return;
|
||||||
engine->write_tail(engine, ringbuf->tail);
|
engine->write_tail(engine, ringbuf->tail);
|
||||||
}
|
}
|
||||||
|
@ -2274,21 +2274,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
intel_cleanup_ring_buffer(engine);
|
intel_cleanup_engine(engine);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *engine)
|
void intel_cleanup_engine(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv;
|
struct drm_i915_private *dev_priv;
|
||||||
|
|
||||||
if (!intel_ring_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev_priv = to_i915(engine->dev);
|
dev_priv = to_i915(engine->dev);
|
||||||
|
|
||||||
if (engine->buffer) {
|
if (engine->buffer) {
|
||||||
intel_stop_ring_buffer(engine);
|
intel_stop_engine(engine);
|
||||||
WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||||
|
|
||||||
intel_unpin_ringbuffer_obj(engine->buffer);
|
intel_unpin_ringbuffer_obj(engine->buffer);
|
||||||
|
@ -3163,11 +3163,11 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
intel_stop_ring_buffer(struct intel_engine_cs *engine)
|
intel_stop_engine(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!intel_ring_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = intel_engine_idle(engine);
|
ret = intel_engine_idle(engine);
|
||||||
|
|
|
@ -146,7 +146,7 @@ struct i915_ctx_workarounds {
|
||||||
|
|
||||||
struct intel_engine_cs {
|
struct intel_engine_cs {
|
||||||
const char *name;
|
const char *name;
|
||||||
enum intel_ring_id {
|
enum intel_engine_id {
|
||||||
RCS = 0,
|
RCS = 0,
|
||||||
BCS,
|
BCS,
|
||||||
VCS,
|
VCS,
|
||||||
|
@ -355,7 +355,7 @@ struct intel_engine_cs {
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
intel_ring_initialized(struct intel_engine_cs *engine)
|
intel_engine_initialized(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
return engine->dev != NULL;
|
return engine->dev != NULL;
|
||||||
}
|
}
|
||||||
|
@ -438,8 +438,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||||
|
|
||||||
void intel_stop_ring_buffer(struct intel_engine_cs *engine);
|
void intel_stop_engine(struct intel_engine_cs *engine);
|
||||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *engine);
|
void intel_cleanup_engine(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
|
||||||
int __intel_ring_space(int head, int tail, int size);
|
int __intel_ring_space(int head, int tail, int size);
|
||||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||||
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
||||||
bool intel_ring_stopped(struct intel_engine_cs *engine);
|
bool intel_engine_stopped(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
||||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче