drm: Add drm_mm_for_each_node_safe()
A complement to drm_mm_for_each_node(), wraps list_for_each_entry_safe() for walking the list of nodes safe against removal. Note from Joonas: "Most of the diff is about __drm_mm_nodes(mm), which could be split into own patch and keep the R-b's." But I don't feel like insisting on the resend. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> [danvet: Add note.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161216074718.32500-4-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
56e3d1cd05
Коммит
ad579002c8
|
@ -138,7 +138,7 @@ static void show_leaks(struct drm_mm *mm)
|
|||
if (!buf)
|
||||
return;
|
||||
|
||||
list_for_each_entry(node, &mm->head_node.node_list, node_list) {
|
||||
list_for_each_entry(node, __drm_mm_nodes(mm), node_list) {
|
||||
struct stack_trace trace = {
|
||||
.entries = entries,
|
||||
.max_entries = STACKDEPTH
|
||||
|
@ -320,8 +320,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
|||
if (hole->start < end)
|
||||
return -ENOSPC;
|
||||
} else {
|
||||
hole = list_entry(&mm->head_node.node_list,
|
||||
typeof(*hole), node_list);
|
||||
hole = list_entry(__drm_mm_nodes(mm), typeof(*hole), node_list);
|
||||
}
|
||||
|
||||
hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
|
||||
|
@ -884,7 +883,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
|
|||
*/
|
||||
bool drm_mm_clean(struct drm_mm * mm)
|
||||
{
|
||||
struct list_head *head = &mm->head_node.node_list;
|
||||
struct list_head *head = __drm_mm_nodes(mm);
|
||||
|
||||
return (head->next->next == head);
|
||||
}
|
||||
|
@ -930,7 +929,7 @@ EXPORT_SYMBOL(drm_mm_init);
|
|||
*/
|
||||
void drm_mm_takedown(struct drm_mm *mm)
|
||||
{
|
||||
if (WARN(!list_empty(&mm->head_node.node_list),
|
||||
if (WARN(!list_empty(__drm_mm_nodes(mm)),
|
||||
"Memory manager not clean during takedown.\n"))
|
||||
show_leaks(mm);
|
||||
|
||||
|
|
|
@ -179,6 +179,8 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
|||
return __drm_mm_hole_node_end(hole_node);
|
||||
}
|
||||
|
||||
#define __drm_mm_nodes(mm) (&(mm)->head_node.node_list)
|
||||
|
||||
/**
|
||||
* drm_mm_for_each_node - iterator to walk over all allocated nodes
|
||||
* @entry: drm_mm_node structure to assign to in each iteration step
|
||||
|
@ -187,9 +189,20 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
|||
* This iterator walks over all nodes in the range allocator. It is implemented
|
||||
* with list_for_each, so not save against removal of elements.
|
||||
*/
|
||||
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
||||
&(mm)->head_node.node_list, \
|
||||
node_list)
|
||||
#define drm_mm_for_each_node(entry, mm) \
|
||||
list_for_each_entry(entry, __drm_mm_nodes(mm), node_list)
|
||||
|
||||
/**
|
||||
* drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
|
||||
* @entry: drm_mm_node structure to assign to in each iteration step
|
||||
* @next: drm_mm_node structure to store the next step
|
||||
* @mm: drm_mm allocator to walk
|
||||
*
|
||||
* This iterator walks over all nodes in the range allocator. It is implemented
|
||||
* with list_for_each_safe, so save against removal of elements.
|
||||
*/
|
||||
#define drm_mm_for_each_node_safe(entry, next, mm) \
|
||||
list_for_each_entry_safe(entry, next, __drm_mm_nodes(mm), node_list)
|
||||
|
||||
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
|
||||
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
|
||||
|
|
Загрузка…
Ссылка в новой задаче