drm/i915: Refactor shmem pread setup
The command parser is going to need the same synchronization and setup logic, so factor it out for reuse. v2: Add a check that the object is backed by shmem Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Родитель
922044c9df
Коммит
4c914c0c7c
|
@ -2140,6 +2140,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
|||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
int *needs_clflush);
|
||||
|
||||
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
|
|
|
@ -327,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pins the specified object's pages and synchronizes the object with
|
||||
* GPU accesses. Sets needs_clflush to non-zero if the caller should
|
||||
* flush the object from the CPU cache.
|
||||
*/
|
||||
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
int *needs_clflush)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*needs_clflush = 0;
|
||||
|
||||
if (!obj->base.filp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
|
||||
/* If we're not in the cpu read domain, set ourself into the gtt
|
||||
* read domain and manually flush cachelines (if required). This
|
||||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens. */
|
||||
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
|
||||
obj->cache_level);
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Per-page copy function for the shmem pread fastpath.
|
||||
* Flushes invalid cachelines before reading the target if
|
||||
* needs_clflush is set. */
|
||||
|
@ -424,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
|||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
|
||||
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
|
||||
/* If we're not in the cpu read domain, set ourself into the gtt
|
||||
* read domain and manually flush cachelines (if required). This
|
||||
* optimizes for the case when the gpu will dirty the data
|
||||
* anyway again before the next pread happens. */
|
||||
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
|
||||
ret = i915_gem_object_wait_rendering(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages(obj);
|
||||
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
offset = args->offset;
|
||||
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
|
||||
|
|
Загрузка…
Ссылка в новой задаче