drm/i915: Support asynchronous waits on struct fence from i915_gem_request
We will need to wait on DMA completion (as signaled via struct fence) before executing our i915_gem_request. Therefore we want to expose a method for adding the await on the fence itself to the request. v2: Add a comment detailing a failure to handle a signal-on-any fence-array. v3: Pretend that magic numbers don't exist. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-1-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
fc0990903c
Коммит
b52992c06c
|
@ -1423,6 +1423,9 @@ struct i915_error_state_file_priv {
|
|||
struct drm_i915_error_state *error;
|
||||
};
|
||||
|
||||
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
|
||||
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
|
||||
|
||||
struct i915_gpu_error {
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
|
||||
|
|
|
@ -114,7 +114,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
|
|||
*/
|
||||
ret = wait_event_interruptible_timeout(error->reset_queue,
|
||||
!i915_reset_in_progress(error),
|
||||
10*HZ);
|
||||
I915_RESET_TIMEOUT);
|
||||
if (ret == 0) {
|
||||
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
|
||||
return -EIO;
|
||||
|
|
|
@ -1134,7 +1134,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
if (resv) {
|
||||
ret = i915_sw_fence_await_reservation
|
||||
(&req->submit, resv, &i915_fence_ops,
|
||||
obj->base.pending_write_domain, 10*HZ,
|
||||
obj->base.pending_write_domain,
|
||||
I915_FENCE_TIMEOUT,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -496,6 +497,53 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_array *array;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return 0;
|
||||
|
||||
if (dma_fence_is_i915(fence))
|
||||
return i915_gem_request_await_request(req, to_request(fence));
|
||||
|
||||
if (!dma_fence_is_array(fence)) {
|
||||
ret = i915_sw_fence_await_dma_fence(&req->submit,
|
||||
fence, I915_FENCE_TIMEOUT,
|
||||
GFP_KERNEL);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/* Note that if the fence-array was created in signal-on-any mode,
|
||||
* we should *not* decompose it into its individual fences. However,
|
||||
* we don't currently store which mode the fence-array is operating
|
||||
* in. Fortunately, the only user of signal-on-any is private to
|
||||
* amdgpu and we should not see any incoming fence-array from
|
||||
* sync-file being in signal-on-any mode.
|
||||
*/
|
||||
|
||||
array = to_dma_fence_array(fence);
|
||||
for (i = 0; i < array->num_fences; i++) {
|
||||
struct dma_fence *child = array->fences[i];
|
||||
|
||||
if (dma_fence_is_i915(child))
|
||||
ret = i915_gem_request_await_request(req,
|
||||
to_request(child));
|
||||
else
|
||||
ret = i915_sw_fence_await_dma_fence(&req->submit,
|
||||
child, I915_FENCE_TIMEOUT,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_request_await_object - set this request to (async) wait upon a bo
|
||||
*
|
||||
|
|
|
@ -147,7 +147,7 @@ struct drm_i915_gem_request {
|
|||
|
||||
extern const struct dma_fence_ops i915_fence_ops;
|
||||
|
||||
static inline bool fence_is_i915(struct dma_fence *fence)
|
||||
static inline bool dma_fence_is_i915(const struct dma_fence *fence)
|
||||
{
|
||||
return fence->ops == &i915_fence_ops;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ to_request(struct dma_fence *fence)
|
|||
{
|
||||
/* We assume that NULL fence/request are interoperable */
|
||||
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
|
||||
GEM_BUG_ON(fence && !fence_is_i915(fence));
|
||||
GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
|
||||
return container_of(fence, struct drm_i915_gem_request, fence);
|
||||
}
|
||||
|
||||
|
@ -214,6 +214,8 @@ int
|
|||
i915_gem_request_await_object(struct drm_i915_gem_request *to,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool write);
|
||||
int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
|
||||
struct dma_fence *fence);
|
||||
|
||||
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
|
||||
#define i915_add_request(req) \
|
||||
|
|
Загрузка…
Ссылка в новой задаче