drm/i915: Update ring->add_request() to take a request structure
Updated the various ring->add_request() implementations to take a request instead of a ring. This removes their reliance on the OLR to obtain the seqno value that the request should be tagged with. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Родитель
7deb4d3980
Коммит
ee044a8863
|
@ -2524,7 +2524,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||
if (i915.enable_execlists)
|
||||
ret = ring->emit_request(ringbuf, request);
|
||||
else {
|
||||
ret = ring->add_request(ring);
|
||||
ret = ring->add_request(request);
|
||||
|
||||
request->tail = intel_ring_get_tail(ringbuf);
|
||||
}
|
||||
|
|
|
@ -1288,16 +1288,16 @@ static int gen6_signal(struct intel_engine_cs *signaller,
|
|||
|
||||
/**
|
||||
* gen6_add_request - Update the semaphore mailbox registers
|
||||
*
|
||||
* @ring - ring that is adding a request
|
||||
* @seqno - return seqno stuck into the ring
|
||||
*
|
||||
* @request - request to write to the ring
|
||||
*
|
||||
* Update the mailbox registers in the *other* rings with the current seqno.
|
||||
* This acts like a signal in the canonical semaphore.
|
||||
*/
|
||||
static int
|
||||
gen6_add_request(struct intel_engine_cs *ring)
|
||||
gen6_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (ring->semaphore.signal)
|
||||
|
@ -1310,8 +1310,7 @@ gen6_add_request(struct intel_engine_cs *ring)
|
|||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
@ -1408,8 +1407,9 @@ do { \
|
|||
} while (0)
|
||||
|
||||
static int
|
||||
pc_render_add_request(struct intel_engine_cs *ring)
|
||||
pc_render_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
|
||||
|
@ -1429,8 +1429,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
|
|||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
|
||||
|
@ -1449,8 +1448,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
|
|||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, 0);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
@ -1619,8 +1617,9 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
|||
}
|
||||
|
||||
static int
|
||||
i9xx_add_request(struct intel_engine_cs *ring)
|
||||
i9xx_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
|
@ -1629,8 +1628,7 @@ i9xx_add_request(struct intel_engine_cs *ring)
|
|||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_request_get_seqno(ring->outstanding_lazy_request));
|
||||
intel_ring_emit(ring, i915_gem_request_get_seqno(req));
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ struct intel_engine_cs {
|
|||
int __must_check (*flush)(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_engine_cs *ring);
|
||||
int (*add_request)(struct drm_i915_gem_request *req);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
|
|
Загрузка…
Ссылка в новой задаче