We used to pre-allocate drm_mm nodes and save them in a linked list for
later usage so we always have spare ones in atomic contexts. However, this
is really racy if multiple threads are in an atomic context at the same
time and we don't have enough spare nodes. Moreover, all remaining users
run in user-context and just lock drm_mm with a spinlock. So we can easily
preallocate the node, take the spinlock and insert the node.

This may have worked well with BKL in place, however, with today's
infrastructure it really doesn't make any sense. Besides, most users can
easily embed drm_mm_node into their objects so no allocation is needed at
all.

Thus, remove the old pre-alloc API and all the helpers that it provides.
Drivers have already been converted and we should not use the old API for
new code, anymore.

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
David Herrmann 2013-07-27 13:39:28 +02:00 коммит произвёл Dave Airlie
Родитель 06e78edff1
Коммит c700c67bae
2 изменённых файлов: 20 добавлений и 235 удалений

Просмотреть файл

@ -49,58 +49,18 @@
#define MM_UNUSED_TARGET 4
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
if (atomic)
child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
child = kzalloc(sizeof(*child), GFP_KERNEL);
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, node_list);
list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
}
return child;
}
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
spin_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
@ -187,24 +147,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_reserve_node);
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper(hole_node, node, size, alignment, color);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
@ -279,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *node;
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
@ -359,28 +280,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
/*
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
void drm_mm_put_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
drm_mm_remove_node(node);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(node);
spin_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
@ -396,11 +295,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
return end >= start + size;
}
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags)
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@ -434,9 +333,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_generic);
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
@ -481,7 +379,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@ -654,10 +551,7 @@ EXPORT_SYMBOL(drm_mm_clean);
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
@ -677,22 +571,8 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
struct drm_mm_node *entry, *next;
if (WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean. Delaying takedown\n")) {
return;
}
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
BUG_ON(mm->num_unused != 0);
WARN(!list_empty(&mm->head_node.node_list),
"Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);

Просмотреть файл

@ -70,9 +70,6 @@ struct drm_mm {
/* head_node.node_list is the list of all memory nodes, ordered
* according to the (increasing) start address of the memory node. */
struct drm_mm_node head_node;
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
@ -123,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \
node_list)
#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
for (entry = (mm)->prev_scanned_node, \
next = entry ? list_entry(entry->node_list.next, \
struct drm_mm_node, node_list) : NULL; \
entry != NULL; entry = next, \
next = entry ? list_entry(entry->node_list.next, \
struct drm_mm_node, node_list) : NULL) \
/* Note that we need to unroll list_for_each_entry in order to inline
* setting hole_start and hole_end on each iteration and keep the
@ -147,52 +137,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
* Basic range manager support (drm_mm.c)
*/
extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
@ -229,52 +173,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
0, start, end, flags);
}
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags flags);
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags);
static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
enum drm_mm_search_flags flags)
{
return drm_mm_search_free_generic(mm,size, alignment, 0, flags);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
start, end, flags);
}
extern void drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,