drm/i915: Embed the io-mapping struct inside drm_i915_private
As io_mapping.h now always allocates the struct, we can avoid that allocation and extra pointer dance by embedding the struct inside drm_i915_private Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-5-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
cafaf14a5d
Коммит
f7bbe7883c
|
@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
|
|||
* and write to user memory which may result into page
|
||||
* faults, and so we cannot perform this under struct_mutex.
|
||||
*/
|
||||
if (slow_user_access(ggtt->mappable, page_base,
|
||||
if (slow_user_access(&ggtt->mappable, page_base,
|
||||
page_offset, user_data,
|
||||
page_length, false)) {
|
||||
ret = -EFAULT;
|
||||
|
@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
|
|||
* If the object is non-shmem backed, we retry again with the
|
||||
* path that handles page fault.
|
||||
*/
|
||||
if (fast_user_write(ggtt->mappable, page_base,
|
||||
if (fast_user_write(&ggtt->mappable, page_base,
|
||||
page_offset, user_data, page_length)) {
|
||||
hit_slow_path = true;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (slow_user_access(ggtt->mappable,
|
||||
if (slow_user_access(&ggtt->mappable,
|
||||
page_base,
|
||||
page_offset, user_data,
|
||||
page_length, true)) {
|
||||
|
|
|
@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
offset += page << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
|
||||
vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
|
||||
cache->page = page;
|
||||
cache->vaddr = (unsigned long)vaddr;
|
||||
|
||||
|
|
|
@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
|
|||
ggtt->base.cleanup(&ggtt->base);
|
||||
|
||||
arch_phys_wc_del(ggtt->mtrr);
|
||||
io_mapping_free(ggtt->mappable);
|
||||
io_mapping_fini(&ggtt->mappable);
|
||||
}
|
||||
|
||||
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
|
@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
|
|||
if (!HAS_LLC(dev_priv))
|
||||
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
|
||||
|
||||
ggtt->mappable =
|
||||
io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end);
|
||||
if (!ggtt->mappable) {
|
||||
if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
|
||||
dev_priv->ggtt.mappable_base,
|
||||
dev_priv->ggtt.mappable_end)) {
|
||||
ret = -EIO;
|
||||
goto out_gtt_cleanup;
|
||||
}
|
||||
|
@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
|
|||
|
||||
ptr = vma->iomap;
|
||||
if (ptr == NULL) {
|
||||
ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
|
||||
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
|
||||
vma->node.start,
|
||||
vma->node.size);
|
||||
if (ptr == NULL)
|
||||
|
|
|
@ -439,13 +439,13 @@ struct i915_address_space {
|
|||
*/
|
||||
struct i915_ggtt {
|
||||
struct i915_address_space base;
|
||||
struct io_mapping mappable; /* Mapping to our CPU mappable region */
|
||||
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
|
|
|
@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
|||
* captures what the GPU read.
|
||||
*/
|
||||
|
||||
s = io_mapping_map_atomic_wc(ggtt->mappable,
|
||||
s = io_mapping_map_atomic_wc(&ggtt->mappable,
|
||||
reloc_offset);
|
||||
memcpy_fromio(d, s, PAGE_SIZE);
|
||||
io_mapping_unmap_atomic(s);
|
||||
|
|
|
@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
|||
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
|
||||
regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
|
||||
overlay->flip_addr,
|
||||
PAGE_SIZE);
|
||||
|
||||
|
@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
|||
regs = (struct overlay_registers __iomem *)
|
||||
overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
|
||||
regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
|
||||
overlay->flip_addr);
|
||||
|
||||
return regs;
|
||||
|
|
Загрузка…
Ссылка в новой задаче