drm/i915: Use helpers for drm_mm_node booleans
A subset of71724f7089
("drm/mm: Use helpers for drm_mm_node booleans") in order to prepare drm-intel-next-queued for subsequent patches before we can backmerge71724f7089
itself. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004142226.13711-1-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
261ea7e29e
Коммит
b290a78b5c
|
@ -968,7 +968,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
|
|||
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
||||
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
||||
|
||||
if (cache->node.allocated) {
|
||||
if (drm_mm_node_allocated(&cache->node)) {
|
||||
ggtt->vm.clear_range(&ggtt->vm,
|
||||
cache->node.start,
|
||||
cache->node.size);
|
||||
|
@ -1061,7 +1061,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
offset = cache->node.start;
|
||||
if (cache->node.allocated) {
|
||||
if (drm_mm_node_allocated(&cache->node)) {
|
||||
ggtt->vm.insert_page(&ggtt->vm,
|
||||
i915_gem_object_get_dma_address(obj, page),
|
||||
offset, I915_CACHE_NONE, 0);
|
||||
|
|
|
@ -387,7 +387,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
|
|||
{
|
||||
struct drm_mm_node *node = &ggtt->uc_fw;
|
||||
|
||||
GEM_BUG_ON(!node->allocated);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(node));
|
||||
GEM_BUG_ON(upper_32_bits(node->start));
|
||||
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
|
||||
|
||||
|
|
|
@ -356,7 +356,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
|||
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
GEM_BUG_ON(!node.allocated);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&node));
|
||||
}
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
@ -393,7 +393,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
|||
unsigned page_offset = offset_in_page(offset);
|
||||
unsigned page_length = PAGE_SIZE - page_offset;
|
||||
page_length = remain < page_length ? remain : page_length;
|
||||
if (node.allocated) {
|
||||
if (drm_mm_node_allocated(&node)) {
|
||||
ggtt->vm.insert_page(&ggtt->vm,
|
||||
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
|
||||
node.start, I915_CACHE_NONE, 0);
|
||||
|
@ -415,7 +415,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
|||
i915_gem_object_unlock_fence(obj, fence);
|
||||
out_unpin:
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (node.allocated) {
|
||||
if (drm_mm_node_allocated(&node)) {
|
||||
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
|
||||
remove_mappable_node(&node);
|
||||
} else {
|
||||
|
@ -566,7 +566,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out_rpm;
|
||||
GEM_BUG_ON(!node.allocated);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&node));
|
||||
}
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
unsigned int page_offset = offset_in_page(offset);
|
||||
unsigned int page_length = PAGE_SIZE - page_offset;
|
||||
page_length = remain < page_length ? remain : page_length;
|
||||
if (node.allocated) {
|
||||
if (drm_mm_node_allocated(&node)) {
|
||||
/* flush the write before we modify the GGTT */
|
||||
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
||||
ggtt->vm.insert_page(&ggtt->vm,
|
||||
|
@ -636,7 +636,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
out_unpin:
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
||||
if (node.allocated) {
|
||||
if (drm_mm_node_allocated(&node)) {
|
||||
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
|
||||
remove_mappable_node(&node);
|
||||
} else {
|
||||
|
|
|
@ -299,7 +299,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||
break;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!node->allocated);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(node));
|
||||
vma = container_of(node, typeof(*vma), node);
|
||||
|
||||
/* If we are using coloring to insert guard pages between
|
||||
|
|
|
@ -795,7 +795,7 @@ void i915_vma_reopen(struct i915_vma *vma)
|
|||
|
||||
static void __i915_vma_destroy(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(vma->node.allocated);
|
||||
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(vma->fence);
|
||||
|
||||
mutex_lock(&vma->vm->mutex);
|
||||
|
|
|
@ -228,7 +228,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
|
|||
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
|
||||
GEM_BUG_ON(!vma->node.allocated);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(upper_32_bits(vma->node.start));
|
||||
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
|
||||
return lower_32_bits(vma->node.start);
|
||||
|
@ -390,7 +390,7 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
|
|||
static inline bool i915_node_color_differs(const struct drm_mm_node *node,
|
||||
unsigned long color)
|
||||
{
|
||||
return node->allocated && node->color != color;
|
||||
return drm_mm_node_allocated(node) && node->color != color;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Загрузка…
Ссылка в новой задаче