Merge branch 'slab/next' into slab/for-linus

This commit is contained in:
Pekka Enberg 2012-10-03 09:56:12 +03:00
Родитель a0d271cbfe 608da7e3fc
Коммит 023dc70470
8 изменённых файлов: 187 добавлений и 163 удалений

Просмотреть файл

@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from. * request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \ #define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_) __kmalloc_track_caller(size, flags, _RET_IP_)
@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from. * allocation request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \ __kmalloc_node_track_caller(size, flags, node, \

Просмотреть файл

@ -45,7 +45,6 @@ struct kmem_cache {
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache; struct kmem_cache *slabp_cache;
unsigned int slab_size; unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
/* constructor func */ /* constructor func */
void (*ctor)(void *obj); void (*ctor)(void *obj);
@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size, extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else #else
static __always_inline void * static __always_inline void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
} }
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif #endif
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
@ -154,7 +147,7 @@ found:
#endif #endif
cachep = malloc_sizes[i].cs_cachep; cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_trace(size, cachep, flags); ret = kmem_cache_alloc_trace(cachep, flags, size);
return ret; return ret;
} }

Просмотреть файл

@ -1,12 +1,14 @@
#ifndef __LINUX_SLOB_DEF_H #ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H
#include <linux/numa.h>
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
gfp_t flags) gfp_t flags)
{ {
return kmem_cache_alloc_node(cachep, flags, -1); return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
} }
void *__kmalloc_node(size_t size, gfp_t flags, int node); void *__kmalloc_node(size_t size, gfp_t flags, int node);
@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
*/ */
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
{ {
return __kmalloc_node(size, flags, -1); return __kmalloc_node(size, flags, NUMA_NO_NODE);
} }
static __always_inline void *__kmalloc(size_t size, gfp_t flags) static __always_inline void *__kmalloc(size_t size, gfp_t flags)

Просмотреть файл

@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif #endif
#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
/* /*
* Do not go above this order unless 0 objects fit into the slab or * Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line. * overridden on the command line.
@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO; static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata; static bool slab_max_order_set __initdata;
static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
BUG_ON(!PageSlab(page));
return page->slab_cache;
}
static inline struct kmem_cache *virt_to_cache(const void *obj) static inline struct kmem_cache *virt_to_cache(const void *obj)
{ {
struct page *page = virt_to_head_page(obj); struct page *page = virt_to_head_page(obj);
@ -818,6 +803,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg); function, cachep->name, msg);
dump_stack(); dump_stack();
add_taint(TAINT_BAD_PAGE);
} }
/* /*
@ -1781,9 +1767,6 @@ void __init kmem_cache_init_late(void)
slab_state = UP; slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
list_for_each_entry(cachep, &slab_caches, list) list_for_each_entry(cachep, &slab_caches, list)
@ -1791,6 +1774,9 @@ void __init kmem_cache_init_late(void)
BUG(); BUG();
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* Done! */ /* Done! */
slab_state = FULL; slab_state = FULL;
@ -2506,8 +2492,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
} }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
&& cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { && cachep->object_size > cache_line_size()
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); && ALIGN(size, cachep->align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
size = PAGE_SIZE; size = PAGE_SIZE;
} }
#endif #endif
@ -3098,7 +3085,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
} }
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct page *page; struct page *page;
unsigned int objnr; unsigned int objnr;
@ -3118,7 +3105,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone2(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE;
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
objnr = obj_to_index(cachep, slabp, objp); objnr = obj_to_index(cachep, slabp, objp);
@ -3131,7 +3118,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller); store_stackinfo(cachep, objp, caller);
kernel_map_pages(virt_to_page(objp), kernel_map_pages(virt_to_page(objp),
cachep->size / PAGE_SIZE, 0); cachep->size / PAGE_SIZE, 0);
} else { } else {
@ -3285,7 +3272,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
#if DEBUG #if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, void *caller) gfp_t flags, void *objp, unsigned long caller)
{ {
if (!objp) if (!objp)
return objp; return objp;
@ -3302,7 +3289,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
poison_obj(cachep, objp, POISON_INUSE); poison_obj(cachep, objp, POISON_INUSE);
} }
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller; *dbg_userword(cachep, objp) = (void *)caller;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
@ -3576,8 +3563,8 @@ done:
* Fallback to other node is possible if __GFP_THISNODE is not set. * Fallback to other node is possible if __GFP_THISNODE is not set.
*/ */
static __always_inline void * static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller) unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
@ -3663,7 +3650,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
static __always_inline void * static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *objp; void *objp;
@ -3799,7 +3786,7 @@ free_done:
* be in this state _before_ it is released. Called with disabled ints. * be in this state _before_ it is released. Called with disabled ints.
*/ */
static inline void __cache_free(struct kmem_cache *cachep, void *objp, static inline void __cache_free(struct kmem_cache *cachep, void *objp,
void *caller) unsigned long caller)
{ {
struct array_cache *ac = cpu_cache_get(cachep); struct array_cache *ac = cpu_cache_get(cachep);
@ -3839,7 +3826,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
*/ */
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); void *ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags); cachep->object_size, cachep->size, flags);
@ -3850,14 +3837,14 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void * void *
kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{ {
void *ret; void *ret;
ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, trace_kmalloc(_RET_IP_, ret,
size, slab_buffer_size(cachep), flags); size, cachep->size, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_alloc_trace); EXPORT_SYMBOL(kmem_cache_alloc_trace);
@ -3866,8 +3853,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
void *ret = __cache_alloc_node(cachep, flags, nodeid, void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
__builtin_return_address(0));
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size, cachep->object_size, cachep->size,
@ -3878,17 +3864,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(size_t size, void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
struct kmem_cache *cachep,
gfp_t flags, gfp_t flags,
int nodeid) int nodeid,
size_t size)
{ {
void *ret; void *ret;
ret = __cache_alloc_node(cachep, flags, nodeid, ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP);
__builtin_return_address(0));
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, slab_buffer_size(cachep), size, cachep->size,
flags, nodeid); flags, nodeid);
return ret; return ret;
} }
@ -3896,34 +3882,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif #endif
static __always_inline void * static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags); cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
return kmem_cache_alloc_node_trace(size, cachep, flags, node); return kmem_cache_alloc_node_trace(cachep, flags, node, size);
} }
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, return __do_kmalloc_node(size, flags, node, _RET_IP_);
__builtin_return_address(0));
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, unsigned long caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, flags, node, (void *)caller); return __do_kmalloc_node(size, flags, node, caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else #else
void *__kmalloc_node(size_t size, gfp_t flags, int node) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return __do_kmalloc_node(size, flags, node, NULL); return __do_kmalloc_node(size, flags, node, 0);
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
@ -3936,7 +3921,7 @@ EXPORT_SYMBOL(__kmalloc_node);
* @caller: function caller for debug tracking of the caller * @caller: function caller for debug tracking of the caller
*/ */
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller) unsigned long caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *ret; void *ret;
@ -3949,9 +3934,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags); cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep))) if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep; return cachep;
ret = __cache_alloc(cachep, flags, caller); ret = slab_alloc(cachep, flags, caller);
trace_kmalloc((unsigned long) caller, ret, trace_kmalloc(caller, ret,
size, cachep->size, flags); size, cachep->size, flags);
return ret; return ret;
@ -3961,20 +3946,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, __builtin_return_address(0)); return __do_kmalloc(size, flags, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{ {
return __do_kmalloc(size, flags, (void *)caller); return __do_kmalloc(size, flags, caller);
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
#else #else
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
return __do_kmalloc(size, flags, NULL); return __do_kmalloc(size, flags, 0);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
#endif #endif
@ -3995,7 +3980,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed(objp, cachep->object_size); debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size); debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, __builtin_return_address(0)); __cache_free(cachep, objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
trace_kmem_cache_free(_RET_IP_, objp); trace_kmem_cache_free(_RET_IP_, objp);
@ -4026,7 +4011,7 @@ void kfree(const void *objp)
debug_check_no_locks_freed(objp, c->object_size); debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size);
__cache_free(c, (void *)objp, __builtin_return_address(0)); __cache_free(c, (void *)objp, _RET_IP_);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);

Просмотреть файл

@ -23,6 +23,52 @@ enum slab_state slab_state;
LIST_HEAD(slab_caches); LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex); DEFINE_MUTEX(slab_mutex);
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
struct kmem_cache *s = NULL;
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
pr_err("kmem_cache_create(%s) integrity check failed\n", name);
return -EINVAL;
}
list_for_each_entry(s, &slab_caches, list) {
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
pr_err("Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
s = NULL;
return -EINVAL;
}
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
return 0;
}
#else
static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
return 0;
}
#endif
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache. * @name: A string which is used in /proc/slabinfo to identify this cache.
@ -53,60 +99,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
{ {
struct kmem_cache *s = NULL; struct kmem_cache *s = NULL;
#ifdef CONFIG_DEBUG_VM
if (!name || in_interrupt() || size < sizeof(void *) ||
size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "kmem_cache_create(%s) integrity check"
" failed\n", name);
goto out;
}
#endif
get_online_cpus(); get_online_cpus();
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
if (kmem_cache_sanity_check(name, size) == 0)
#ifdef CONFIG_DEBUG_VM s = __kmem_cache_create(name, size, align, flags, ctor);
list_for_each_entry(s, &slab_caches, list) {
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
printk(KERN_ERR
"Slab cache with size %d has lost its name\n",
s->object_size);
continue;
}
if (!strcmp(s->name, name)) {
printk(KERN_ERR "kmem_cache_create(%s): Cache name"
" already exists.\n",
name);
dump_stack();
s = NULL;
goto oops;
}
}
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#endif
s = __kmem_cache_create(name, size, align, flags, ctor);
#ifdef CONFIG_DEBUG_VM
oops:
#endif
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
#ifdef CONFIG_DEBUG_VM
out:
#endif
if (!s && (flags & SLAB_PANIC)) if (!s && (flags & SLAB_PANIC))
panic("kmem_cache_create: Failed to create slab '%s'\n", name); panic("kmem_cache_create: Failed to create slab '%s'\n", name);

Просмотреть файл

@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void *page; void *page;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (node != -1) if (node != NUMA_NO_NODE)
page = alloc_pages_exact_node(node, gfp, order); page = alloc_pages_exact_node(node, gfp, order);
else else
#endif #endif
@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
* page with a matching node id in the freelist. * page with a matching node id in the freelist.
*/ */
if (node != -1 && page_to_nid(sp) != node) if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
continue; continue;
#endif #endif
/* Enough room on this page? */ /* Enough room on this page? */
@ -425,7 +425,8 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/ */
void *__kmalloc_node(size_t size, gfp_t gfp, int node) static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{ {
unsigned int *m; unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*m = size; *m = size;
ret = (void *)m + align; ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, size + align, gfp, node); size, size + align, gfp, node);
} else { } else {
unsigned int order = get_order(size); unsigned int order = get_order(size);
@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page->private = size; page->private = size;
} }
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node); size, PAGE_SIZE << order, gfp, node);
} }
kmemleak_alloc(ret, size, 1, gfp); kmemleak_alloc(ret, size, 1, gfp);
return ret; return ret;
} }
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
return __do_kmalloc_node(size, gfp, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#ifdef CONFIG_TRACING
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
}
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, gfp, node, caller);
}
#endif
#endif
void kfree(const void *block) void kfree(const void *block)
{ {
struct page *sp; struct page *sp;
@ -514,7 +535,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct kmem_cache *c; struct kmem_cache *c;
c = slob_alloc(sizeof(struct kmem_cache), c = slob_alloc(sizeof(struct kmem_cache),
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);
if (c) { if (c) {
c->name = name; c->name = name;

Просмотреть файл

@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
printk(KERN_ERR "----------------------------------------" printk(KERN_ERR "----------------------------------------"
"-------------------------------------\n\n"); "-------------------------------------\n\n");
add_taint(TAINT_BAD_PAGE);
} }
static void slab_fix(struct kmem_cache *s, char *fmt, ...) static void slab_fix(struct kmem_cache *s, char *fmt, ...)
@ -1069,13 +1071,13 @@ bad:
return 0; return 0;
} }
static noinline int free_debug_processing(struct kmem_cache *s, static noinline struct kmem_cache_node *free_debug_processing(
struct page *page, void *object, unsigned long addr) struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags)
{ {
unsigned long flags; struct kmem_cache_node *n = get_node(s, page_to_nid(page));
int rc = 0;
local_irq_save(flags); spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page); slab_lock(page);
if (!check_slab(s, page)) if (!check_slab(s, page))
@ -1113,15 +1115,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
set_track(s, object, TRACK_FREE, addr); set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0); trace(s, page, object, 0);
init_object(s, object, SLUB_RED_INACTIVE); init_object(s, object, SLUB_RED_INACTIVE);
rc = 1;
out: out:
slab_unlock(page); slab_unlock(page);
local_irq_restore(flags); /*
return rc; * Keep node_lock to preserve integrity
* until the object is actually freed
*/
return n;
fail: fail:
slab_unlock(page);
spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object); slab_fix(s, "Object at 0x%p not freed", object);
goto out; return NULL;
} }
static int __init setup_slub_debug(char *str) static int __init setup_slub_debug(char *str)
@ -1214,8 +1220,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
static inline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s, static inline struct kmem_cache_node *free_debug_processing(
struct page *page, void *object, unsigned long addr) { return 0; } struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags) { return NULL; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; } { return 1; }
@ -1714,7 +1721,7 @@ static inline void note_cmpxchg_failure(const char *n,
stat(s, CMPXCHG_DOUBLE_CPU_FAIL); stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
} }
void init_kmem_cache_cpus(struct kmem_cache *s) static void init_kmem_cache_cpus(struct kmem_cache *s)
{ {
int cpu; int cpu;
@ -1939,7 +1946,7 @@ static void unfreeze_partials(struct kmem_cache *s)
* If we did not find a slot then simply move all the partials to the * If we did not find a slot then simply move all the partials to the
* per node partial list. * per node partial list.
*/ */
int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{ {
struct page *oldpage; struct page *oldpage;
int pages; int pages;
@ -1962,6 +1969,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_save(flags); local_irq_save(flags);
unfreeze_partials(s); unfreeze_partials(s);
local_irq_restore(flags); local_irq_restore(flags);
oldpage = NULL;
pobjects = 0; pobjects = 0;
pages = 0; pages = 0;
stat(s, CPU_PARTIAL_DRAIN); stat(s, CPU_PARTIAL_DRAIN);
@ -2310,7 +2318,7 @@ new_slab:
* *
* Otherwise we can simply pick the next object from the lockless free list. * Otherwise we can simply pick the next object from the lockless free list.
*/ */
static __always_inline void *slab_alloc(struct kmem_cache *s, static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr) gfp_t gfpflags, int node, unsigned long addr)
{ {
void **object; void **object;
@ -2380,9 +2388,15 @@ redo:
return object; return object;
} }
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
@ -2393,7 +2407,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
return ret; return ret;
} }
@ -2411,7 +2425,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
s->object_size, s->size, gfpflags, node); s->object_size, s->size, gfpflags, node);
@ -2425,7 +2439,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags, gfp_t gfpflags,
int node, size_t size) int node, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node); size, s->size, gfpflags, node);
@ -2457,7 +2471,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat(s, FREE_SLOWPATH); stat(s, FREE_SLOWPATH);
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) if (kmem_cache_debug(s) &&
!(n = free_debug_processing(s, page, x, addr, &flags)))
return; return;
do { do {
@ -3362,7 +3377,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); ret = slab_alloc(s, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
@ -3405,7 +3420,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, flags, node, _RET_IP_); ret = slab_alloc_node(s, flags, node, _RET_IP_);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
@ -3482,7 +3497,7 @@ void kfree(const void *x)
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kmemleak_free(x); kmemleak_free(x);
put_page(page); __free_pages(page, compound_order(page));
return; return;
} }
slab_free(page->slab, page, object, _RET_IP_); slab_free(page->slab, page, object, _RET_IP_);
@ -4033,7 +4048,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); ret = slab_alloc(s, gfpflags, caller);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags); trace_kmalloc(caller, ret, size, s->size, gfpflags);
@ -4063,7 +4078,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, gfpflags, node, caller); ret = slab_alloc_node(s, gfpflags, node, caller);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);

Просмотреть файл

@ -105,24 +105,12 @@ void *memdup_user(const void __user *src, size_t len)
} }
EXPORT_SYMBOL(memdup_user); EXPORT_SYMBOL(memdup_user);
/** static __always_inline void *__do_krealloc(const void *p, size_t new_size,
* __krealloc - like krealloc() but don't free @p. gfp_t flags)
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* This function is like krealloc() except it never frees the originally
* allocated buffer. Use this if you don't want to free the buffer immediately
* like, for example, with RCU.
*/
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{ {
void *ret; void *ret;
size_t ks = 0; size_t ks = 0;
if (unlikely(!new_size))
return ZERO_SIZE_PTR;
if (p) if (p)
ks = ksize(p); ks = ksize(p);
@ -135,6 +123,25 @@ void *__krealloc(const void *p, size_t new_size, gfp_t flags)
return ret; return ret;
} }
/**
* __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* This function is like krealloc() except it never frees the originally
* allocated buffer. Use this if you don't want to free the buffer immediately
* like, for example, with RCU.
*/
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
{
if (unlikely(!new_size))
return ZERO_SIZE_PTR;
return __do_krealloc(p, new_size, flags);
}
EXPORT_SYMBOL(__krealloc); EXPORT_SYMBOL(__krealloc);
/** /**
@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return ZERO_SIZE_PTR; return ZERO_SIZE_PTR;
} }
ret = __krealloc(p, new_size, flags); ret = __do_krealloc(p, new_size, flags);
if (ret && p != ret) if (ret && p != ret)
kfree(p); kfree(p);