mm, slub: introduce kmem_cache_debug_flags()
There are few places that call kmem_cache_debug(s) (which tests if any of debug flags are enabled for a cache) immediately followed by a test for a specific flag. The compiler can probably eliminate the extra check, but we can make the code nicer by introducing kmem_cache_debug_flags() that works like kmem_cache_debug() (including the static key check) but tests for specific flag(s). The next patches will add more users. [vbabka@suse.cz: change return from int to bool, per Kees. Add VM_WARN_ON_ONCE() for invalid flags, per Roman] Link: http://lkml.kernel.org/r/949b90ed-e0f0-07d7-4d21-e30ec0958a7c@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Roman Gushchin <guro@fb.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Kees Cook <keescook@chromium.org> Cc: Jann Horn <jannh@google.com> Cc: Vijayanand Jitta <vjitta@codeaurora.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Link: http://lkml.kernel.org/r/20200610163135.17364-8-vbabka@suse.cz Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
ca0cab65ea
Коммит
59052e89fc
21
mm/slub.c
21
mm/slub.c
|
@ -122,18 +122,29 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
|
|||
#endif
|
||||
#endif
|
||||
|
||||
static inline int kmem_cache_debug(struct kmem_cache *s)
|
||||
/*
|
||||
* Returns true if any of the specified slub_debug flags is enabled for the
|
||||
* cache. Use only for flags parsed by setup_slub_debug() as it also enables
|
||||
* the static key.
|
||||
*/
|
||||
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
|
||||
{
|
||||
VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
if (static_branch_unlikely(&slub_debug_enabled))
|
||||
return s->flags & SLAB_DEBUG_FLAGS;
|
||||
return s->flags & flags;
|
||||
#endif
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool kmem_cache_debug(struct kmem_cache *s)
|
||||
{
|
||||
return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
|
||||
}
|
||||
|
||||
void *fixup_red_left(struct kmem_cache *s, void *p)
|
||||
{
|
||||
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
|
||||
if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
|
||||
p += s->red_left_pad;
|
||||
|
||||
return p;
|
||||
|
@ -4060,7 +4071,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
|
|||
offset = (ptr - page_address(page)) % s->size;
|
||||
|
||||
/* Adjust for redzone and reject if within the redzone. */
|
||||
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
|
||||
if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
|
||||
if (offset < s->red_left_pad)
|
||||
usercopy_abort("SLUB object in left red zone",
|
||||
s->name, to_user, offset, n);
|
||||
|
|
Загрузка…
Ссылка в новой задаче