drm/i915: Choose not to evict faultable objects from the GGTT

Often times we do not want to evict mapped objects from the GGTT as
these are quite expensive to teardown and frequently reused (causing an
equally, if not more so, expensive setup). In particular, when faulting
in a new object we want to avoid evicting an active object, or else we
may trigger a page-fault-of-doom as we ping-pong between evicting two
objects.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-26-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-08-18 17:17:05 +01:00
Родитель 50349247ea
Коммит 821188778b
3 изменённых файлов: 16 добавлений и 4 удалений

Просмотреть файл

@ -1706,6 +1706,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
struct i915_vma *vma; struct i915_vma *vma;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
unsigned int flags;
int ret; int ret;
/* We don't use vmf->pgoff since that has the fake offset */ /* We don't use vmf->pgoff since that has the fake offset */
@ -1735,9 +1736,16 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_unlock; goto err_unlock;
} }
/* If the object is smaller than a couple of partial vma, it is
* not worth only creating a single partial vma - we may as well
* clear enough space for the full object.
*/
flags = PIN_MAPPABLE;
if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
flags |= PIN_NONBLOCK | PIN_NONFAULT;
/* Now pin it into the GTT as needed */ /* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
struct i915_ggtt_view view; struct i915_ggtt_view view;
unsigned int chunk_size; unsigned int chunk_size;

Просмотреть файл

@ -47,7 +47,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
} }
static bool static bool
mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
{ {
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
@ -55,6 +55,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list))) if (WARN_ON(!list_empty(&vma->exec_list)))
return false; return false;
if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
return false;
list_add(&vma->exec_list, unwind); list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node); return drm_mm_scan_add_block(&vma->node);
} }
@ -129,7 +132,7 @@ search_again:
phase = phases; phase = phases;
do { do {
list_for_each_entry(vma, *phase, vm_link) list_for_each_entry(vma, *phase, vm_link)
if (mark_free(vma, &eviction_list)) if (mark_free(vma, flags, &eviction_list))
goto found; goto found;
} while (*++phase); } while (*++phase);

Просмотреть файл

@ -637,6 +637,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
#define PIN_NONBLOCK BIT(0) #define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1) #define PIN_MAPPABLE BIT(1)
#define PIN_ZONE_4G BIT(2) #define PIN_ZONE_4G BIT(2)
#define PIN_NONFAULT BIT(3)
#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */