drm/i915: drop pointer to drm_gem_object
Luckily the change is quite a little bit less invasive than I've feared. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Eric Anholt <eric@anholt.net> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Родитель
62b8b21515
Коммит
a8089e849a
|
@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
list_for_each_entry(obj_priv, head, list)
|
list_for_each_entry(obj_priv, head, list)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *obj = obj_priv->obj;
|
|
||||||
|
|
||||||
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
|
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
|
||||||
obj,
|
&obj_priv->base,
|
||||||
get_pin_flag(obj_priv),
|
get_pin_flag(obj_priv),
|
||||||
obj->size,
|
obj_priv->base.size,
|
||||||
obj->read_domains, obj->write_domain,
|
obj_priv->base.read_domains,
|
||||||
|
obj_priv->base.write_domain,
|
||||||
obj_priv->last_rendering_seqno,
|
obj_priv->last_rendering_seqno,
|
||||||
obj_priv->dirty ? " dirty" : "",
|
obj_priv->dirty ? " dirty" : "",
|
||||||
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
||||||
|
|
||||||
if (obj->name)
|
if (obj_priv->base.name)
|
||||||
seq_printf(m, " (name: %d)", obj->name);
|
seq_printf(m, " (name: %d)", obj_priv->base.name);
|
||||||
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
||||||
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
|
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
|
||||||
if (obj_priv->gtt_space != NULL)
|
if (obj_priv->gtt_space != NULL)
|
||||||
|
@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
|
||||||
spin_lock(&dev_priv->mm.active_list_lock);
|
spin_lock(&dev_priv->mm.active_list_lock);
|
||||||
|
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
||||||
obj = obj_priv->obj;
|
obj = &obj_priv->base;
|
||||||
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||||
ret = i915_gem_object_get_pages(obj, 0);
|
ret = i915_gem_object_get_pages(obj, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -648,7 +648,6 @@ typedef struct drm_i915_private {
|
||||||
/** driver private structure attached to each drm_gem_object */
|
/** driver private structure attached to each drm_gem_object */
|
||||||
struct drm_i915_gem_object {
|
struct drm_i915_gem_object {
|
||||||
struct drm_gem_object base;
|
struct drm_gem_object base;
|
||||||
struct drm_gem_object *obj;
|
|
||||||
|
|
||||||
/** Current space allocated to this object in the GTT, if any. */
|
/** Current space allocated to this object in the GTT, if any. */
|
||||||
struct drm_mm_node *gtt_space;
|
struct drm_mm_node *gtt_space;
|
||||||
|
|
|
@ -1566,7 +1566,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
|
||||||
list_for_each_entry_safe(obj_priv, next,
|
list_for_each_entry_safe(obj_priv, next,
|
||||||
&dev_priv->mm.gpu_write_list,
|
&dev_priv->mm.gpu_write_list,
|
||||||
gpu_write_list) {
|
gpu_write_list) {
|
||||||
struct drm_gem_object *obj = obj_priv->obj;
|
struct drm_gem_object *obj = &obj_priv->base;
|
||||||
|
|
||||||
if ((obj->write_domain & flush_domains) ==
|
if ((obj->write_domain & flush_domains) ==
|
||||||
obj->write_domain) {
|
obj->write_domain) {
|
||||||
|
@ -1704,7 +1704,7 @@ i915_gem_retire_request(struct drm_device *dev,
|
||||||
obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
obj_priv = list_first_entry(&dev_priv->mm.active_list,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
list);
|
list);
|
||||||
obj = obj_priv->obj;
|
obj = &obj_priv->base;
|
||||||
|
|
||||||
/* If the seqno being retired doesn't match the oldest in the
|
/* If the seqno being retired doesn't match the oldest in the
|
||||||
* list, then the oldest in the list must still be newer than
|
* list, then the oldest in the list must still be newer than
|
||||||
|
@ -2075,7 +2075,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
|
||||||
|
|
||||||
/* Try to find the smallest clean object */
|
/* Try to find the smallest clean object */
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||||
struct drm_gem_object *obj = obj_priv->obj;
|
struct drm_gem_object *obj = &obj_priv->base;
|
||||||
if (obj->size >= min_size) {
|
if (obj->size >= min_size) {
|
||||||
if ((!obj_priv->dirty ||
|
if ((!obj_priv->dirty ||
|
||||||
i915_gem_object_is_purgeable(obj_priv)) &&
|
i915_gem_object_is_purgeable(obj_priv)) &&
|
||||||
|
@ -2209,7 +2209,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
|
||||||
|
|
||||||
/* Find an object that we can immediately reuse */
|
/* Find an object that we can immediately reuse */
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
|
||||||
obj = obj_priv->obj;
|
obj = &obj_priv->base;
|
||||||
if (obj->size >= min_size)
|
if (obj->size >= min_size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -2437,7 +2437,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
|
||||||
i = I915_FENCE_REG_NONE;
|
i = I915_FENCE_REG_NONE;
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
|
list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
|
||||||
fence_list) {
|
fence_list) {
|
||||||
obj = obj_priv->obj;
|
obj = &obj_priv->base;
|
||||||
|
|
||||||
if (obj_priv->pin_count)
|
if (obj_priv->pin_count)
|
||||||
continue;
|
continue;
|
||||||
|
@ -4441,7 +4441,6 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
|
||||||
obj->agp_type = AGP_USER_MEMORY;
|
obj->agp_type = AGP_USER_MEMORY;
|
||||||
|
|
||||||
obj->base.driver_private = NULL;
|
obj->base.driver_private = NULL;
|
||||||
obj->obj = &obj->base;
|
|
||||||
obj->fence_reg = I915_FENCE_REG_NONE;
|
obj->fence_reg = I915_FENCE_REG_NONE;
|
||||||
INIT_LIST_HEAD(&obj->list);
|
INIT_LIST_HEAD(&obj->list);
|
||||||
INIT_LIST_HEAD(&obj->gpu_write_list);
|
INIT_LIST_HEAD(&obj->gpu_write_list);
|
||||||
|
@ -4495,9 +4494,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
obj = list_first_entry(&dev_priv->mm.inactive_list,
|
obj = &list_first_entry(&dev_priv->mm.inactive_list,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
list)->obj;
|
list)->base;
|
||||||
|
|
||||||
ret = i915_gem_object_unbind(obj);
|
ret = i915_gem_object_unbind(obj);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
@ -5111,7 +5110,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
|
||||||
&dev_priv->mm.inactive_list,
|
&dev_priv->mm.inactive_list,
|
||||||
list) {
|
list) {
|
||||||
if (i915_gem_object_is_purgeable(obj_priv)) {
|
if (i915_gem_object_is_purgeable(obj_priv)) {
|
||||||
i915_gem_object_unbind(obj_priv->obj);
|
i915_gem_object_unbind(&obj_priv->base);
|
||||||
if (--nr_to_scan <= 0)
|
if (--nr_to_scan <= 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -5140,7 +5139,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
|
||||||
&dev_priv->mm.inactive_list,
|
&dev_priv->mm.inactive_list,
|
||||||
list) {
|
list) {
|
||||||
if (nr_to_scan > 0) {
|
if (nr_to_scan > 0) {
|
||||||
i915_gem_object_unbind(obj_priv->obj);
|
i915_gem_object_unbind(&obj_priv->base);
|
||||||
nr_to_scan--;
|
nr_to_scan--;
|
||||||
} else
|
} else
|
||||||
cnt++;
|
cnt++;
|
||||||
|
|
|
@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
|
||||||
struct drm_i915_gem_object *obj_priv;
|
struct drm_i915_gem_object *obj_priv;
|
||||||
|
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||||
obj = obj_priv->obj;
|
obj = &obj_priv->base;
|
||||||
if (obj_priv->pin_count || obj_priv->active ||
|
if (obj_priv->pin_count || obj_priv->active ||
|
||||||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
|
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
|
||||||
I915_GEM_DOMAIN_GTT)))
|
I915_GEM_DOMAIN_GTT)))
|
||||||
|
|
|
@ -613,7 +613,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||||
batchbuffer[1] = NULL;
|
batchbuffer[1] = NULL;
|
||||||
count = 0;
|
count = 0;
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
||||||
struct drm_gem_object *obj = obj_priv->obj;
|
struct drm_gem_object *obj = &obj_priv->base;
|
||||||
|
|
||||||
if (batchbuffer[0] == NULL &&
|
if (batchbuffer[0] == NULL &&
|
||||||
bbaddr >= obj_priv->gtt_offset &&
|
bbaddr >= obj_priv->gtt_offset &&
|
||||||
|
@ -649,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||||
if (error->active_bo) {
|
if (error->active_bo) {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
|
||||||
struct drm_gem_object *obj = obj_priv->obj;
|
struct drm_gem_object *obj = &obj_priv->base;
|
||||||
|
|
||||||
error->active_bo[i].size = obj->size;
|
error->active_bo[i].size = obj->size;
|
||||||
error->active_bo[i].name = obj->name;
|
error->active_bo[i].name = obj->name;
|
||||||
|
|
|
@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||||
|
|
||||||
/* never have the overlay hw on without showing a frame */
|
/* never have the overlay hw on without showing a frame */
|
||||||
BUG_ON(!overlay->vid_bo);
|
BUG_ON(!overlay->vid_bo);
|
||||||
obj = overlay->vid_bo->obj;
|
obj = &overlay->vid_bo->base;
|
||||||
|
|
||||||
i915_gem_object_unpin(obj);
|
i915_gem_object_unpin(obj);
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
|
@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
|
||||||
|
|
||||||
switch (overlay->hw_wedged) {
|
switch (overlay->hw_wedged) {
|
||||||
case RELEASE_OLD_VID:
|
case RELEASE_OLD_VID:
|
||||||
obj = overlay->old_vid_bo->obj;
|
obj = &overlay->old_vid_bo->base;
|
||||||
i915_gem_object_unpin(obj);
|
i915_gem_object_unpin(obj);
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
overlay->old_vid_bo = NULL;
|
overlay->old_vid_bo = NULL;
|
||||||
|
@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
obj = overlay->old_vid_bo->obj;
|
obj = &overlay->old_vid_bo->base;
|
||||||
i915_gem_object_unpin(obj);
|
i915_gem_object_unpin(obj);
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
overlay->old_vid_bo = NULL;
|
overlay->old_vid_bo = NULL;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче