drm/i915: Move GEM initialisation from i915_dma.c to i915_gem.c
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Родитель
9797fbfbcf
Коммит
1070a42b6b
|
@ -1062,70 +1062,6 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
|
|||
return can_switch;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
if (i915_enable_ppgtt >= 0)
|
||||
return i915_enable_ppgtt;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int i915_load_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long gtt_size, mappable_size;
|
||||
int ret;
|
||||
|
||||
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
|
||||
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
|
||||
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
|
||||
* aperture accordingly when using aliasing ppgtt. */
|
||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
|
||||
ret = i915_gem_init_aliasing_ppgtt(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/* Let GEM Manage all of the aperture.
|
||||
*
|
||||
* However, leave one page at the end still bound to the scratch
|
||||
* page. There are a number of places where the hardware
|
||||
* apparently prefetches past the end of the object, and we've
|
||||
* seen multiple hangs with the GPU head pointer stuck in a
|
||||
* batchbuffer bound at the last page of the aperture. One page
|
||||
* should be enough to keep any prefetching inside of the
|
||||
* aperture.
|
||||
*/
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size,
|
||||
gtt_size);
|
||||
}
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Allow hardware batchbuffers unless told otherwise. */
|
||||
dev_priv->allow_batchbuffer = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_load_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1168,7 +1104,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
intel_modeset_init(dev);
|
||||
|
||||
ret = i915_load_gem_init(dev);
|
||||
ret = i915_gem_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
|
|
|
@ -1294,6 +1294,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
|
|||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_init_ppgtt(struct drm_device *dev);
|
||||
|
|
|
@ -3560,6 +3560,70 @@ cleanup_render_ring:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_ppgtt(struct drm_device *dev)
|
||||
{
|
||||
if (i915_enable_ppgtt >= 0)
|
||||
return i915_enable_ppgtt;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Disable ppgtt on SNB if VT-d is on. */
|
||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int i915_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long gtt_size, mappable_size;
|
||||
int ret;
|
||||
|
||||
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
|
||||
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
|
||||
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
|
||||
* aperture accordingly when using aliasing ppgtt. */
|
||||
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
|
||||
ret = i915_gem_init_aliasing_ppgtt(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/* Let GEM Manage all of the aperture.
|
||||
*
|
||||
* However, leave one page at the end still bound to the scratch
|
||||
* page. There are a number of places where the hardware
|
||||
* apparently prefetches past the end of the object, and we've
|
||||
* seen multiple hangs with the GPU head pointer stuck in a
|
||||
* batchbuffer bound at the last page of the aperture. One page
|
||||
* should be enough to keep any prefetching inside of the
|
||||
* aperture.
|
||||
*/
|
||||
i915_gem_init_global_gtt(dev, 0, mappable_size,
|
||||
gtt_size);
|
||||
}
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Allow hardware batchbuffers unless told otherwise. */
|
||||
dev_priv->allow_batchbuffer = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче