drm/i915: Move common request allocation code into a common function

The request allocation code is largely duplicated between legacy mode and
execlist mode. The actual difference between the two versions of the code is
pretty minimal.

This patch moves the common code out into a separate function. This is then
called by the execution specific version prior to setting up the one different
value.

For: VIZ-5190
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
John Harrison 2015-03-19 12:30:08 +00:00 коммит произвёл Daniel Vetter
Родитель bc0dce3fd0
Коммит 6689cb2b62
6 изменённых файлов: 54 добавлений и 56 удалений

Просмотреть файл

@ -2115,6 +2115,8 @@ struct drm_i915_gem_request {
};
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx);
void i915_gem_request_free(struct kref *req_ref);
static inline uint32_t

Просмотреть файл

@ -2518,6 +2518,43 @@ void i915_gem_request_free(struct kref *req_ref)
kfree(req);
}
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret;
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
if (ring->outstanding_lazy_request)
return 0;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
kfree(request);
return ret;
}
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(request, ctx);
else
ret = intel_ring_alloc_request_extras(request);
if (ret) {
kfree(request);
return ret;
}
ring->outstanding_lazy_request = request;
return 0;
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *ring)
{

Просмотреть файл

@ -611,44 +611,21 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
static int logical_ring_alloc_request(struct intel_engine_cs *ring,
struct intel_context *ctx)
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
struct intel_context *ctx)
{
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
int ret;
if (ring->outstanding_lazy_request)
return 0;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
if (ctx != ring->default_context) {
ret = intel_lr_context_pin(ring, ctx);
if (ret) {
kfree(request);
if (ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request->ring, ctx);
if (ret)
return ret;
}
}
kref_init(&request->ref);
request->ring = ring;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
intel_lr_context_unpin(ring, ctx);
kfree(request);
return ret;
}
request->ctx = ctx;
request->ringbuf = ctx->engine[request->ring->id].ringbuf;
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
request->ringbuf = ctx->engine[ring->id].ringbuf;
ring->outstanding_lazy_request = request;
return 0;
}
@ -840,7 +817,7 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
return ret;
/* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_request(ring, ctx);
ret = i915_gem_request_alloc(ring, ctx);
if (ret)
return ret;

Просмотреть файл

@ -36,6 +36,8 @@
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
struct intel_context *ctx);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);

Просмотреть файл

@ -2181,32 +2181,10 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return i915_wait_request(req);
}
static int
intel_ring_alloc_request(struct intel_engine_cs *ring)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
request->ringbuf = request->ring->buffer;
if (ring->outstanding_lazy_request)
return 0;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
kref_init(&request->ref);
request->ring = ring;
request->ringbuf = ring->buffer;
request->uniq = dev_private->request_uniq++;
ret = i915_gem_get_seqno(ring->dev, &request->seqno);
if (ret) {
kfree(request);
return ret;
}
ring->outstanding_lazy_request = request;
return 0;
}
@ -2247,7 +2225,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_request(ring);
ret = i915_gem_request_alloc(ring, ring->default_context);
if (ret)
return ret;

Просмотреть файл

@ -390,6 +390,8 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
static inline void intel_ring_emit(struct intel_engine_cs *ring,