mm/slab.c use list_first_entry_or_null()

Simplify the code with list_first_entry_or_null().

Signed-off-by: Geliang Tang <geliangtang@163.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Geliang Tang 2016-01-14 15:17:56 -08:00 коммит произвёл Linus Torvalds
Родитель 2bd03e49d6
Коммит d8ad47d83f
1 изменённых файлов: 12 добавлений и 12 удалений

Просмотреть файл

@ -2791,18 +2791,18 @@ retry:
}
while (batchcount > 0) {
struct list_head *entry;
struct page *page;
/* Get slab alloc is to come from. */
entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) {
page = list_first_entry_or_null(&n->slabs_partial,
struct page, lru);
if (!page) {
n->free_touched = 1;
entry = n->slabs_free.next;
if (entry == &n->slabs_free)
page = list_first_entry_or_null(&n->slabs_free,
struct page, lru);
if (!page)
goto must_grow;
}
page = list_entry(entry, struct page, lru);
check_spinlock_acquired(cachep);
/*
@ -3085,7 +3085,6 @@ retry:
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
struct page *page;
struct kmem_cache_node *n;
void *obj;
@ -3098,15 +3097,16 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
retry:
check_irq_off();
spin_lock(&n->list_lock);
entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) {
page = list_first_entry_or_null(&n->slabs_partial,
struct page, lru);
if (!page) {
n->free_touched = 1;
entry = n->slabs_free.next;
if (entry == &n->slabs_free)
page = list_first_entry_or_null(&n->slabs_free,
struct page, lru);
if (!page)
goto must_grow;
}
page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid);
STATS_INC_NODEALLOCS(cachep);