2008-04-04 02:51:41 +04:00
|
|
|
#ifndef LINUX_KMEMCHECK_H
|
|
|
|
#define LINUX_KMEMCHECK_H
|
|
|
|
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#ifdef CONFIG_KMEMCHECK
|
|
|
|
extern int kmemcheck_enabled;
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
/* The slab-related functions. */
|
2008-11-25 18:55:53 +03:00
|
|
|
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
|
|
|
|
void kmemcheck_free_shadow(struct page *page, int order);
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
|
|
|
|
size_t size);
|
|
|
|
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
|
|
|
|
|
2008-11-25 18:55:53 +03:00
|
|
|
void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
|
|
|
|
gfp_t gfpflags);
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
void kmemcheck_show_pages(struct page *p, unsigned int n);
|
|
|
|
void kmemcheck_hide_pages(struct page *p, unsigned int n);
|
|
|
|
|
|
|
|
bool kmemcheck_page_is_tracked(struct page *p);
|
|
|
|
|
|
|
|
void kmemcheck_mark_unallocated(void *address, unsigned int n);
|
|
|
|
void kmemcheck_mark_uninitialized(void *address, unsigned int n);
|
|
|
|
void kmemcheck_mark_initialized(void *address, unsigned int n);
|
|
|
|
void kmemcheck_mark_freed(void *address, unsigned int n);
|
|
|
|
|
|
|
|
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
|
|
|
|
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
|
2008-11-25 18:55:53 +03:00
|
|
|
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
|
2008-04-04 02:51:41 +04:00
|
|
|
int kmemcheck_show_addr(unsigned long address);
|
|
|
|
int kmemcheck_hide_addr(unsigned long address);
|
2008-08-30 14:16:05 +04:00
|
|
|
|
2009-08-27 17:50:00 +04:00
|
|
|
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
|
|
|
|
|
2010-01-09 01:42:35 +03:00
|
|
|
/*
|
|
|
|
* Bitfield annotations
|
|
|
|
*
|
|
|
|
* How to use: If you have a struct using bitfields, for example
|
|
|
|
*
|
|
|
|
* struct a {
|
|
|
|
* int x:8, y:8;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* then this should be rewritten as
|
|
|
|
*
|
|
|
|
* struct a {
|
|
|
|
* kmemcheck_bitfield_begin(flags);
|
|
|
|
* int x:8, y:8;
|
|
|
|
* kmemcheck_bitfield_end(flags);
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* Now the "flags_begin" and "flags_end" members may be used to refer to the
|
|
|
|
* beginning and end, respectively, of the bitfield (and things like
|
|
|
|
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
|
|
|
|
* fields should be annotated:
|
|
|
|
*
|
|
|
|
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
|
|
|
|
* kmemcheck_annotate_bitfield(a, flags);
|
|
|
|
*/
|
|
|
|
#define kmemcheck_bitfield_begin(name) \
|
|
|
|
int name##_begin[0];
|
|
|
|
|
|
|
|
#define kmemcheck_bitfield_end(name) \
|
|
|
|
int name##_end[0];
|
|
|
|
|
|
|
|
#define kmemcheck_annotate_bitfield(ptr, name) \
|
|
|
|
do { \
|
|
|
|
int _n; \
|
|
|
|
\
|
|
|
|
if (!ptr) \
|
|
|
|
break; \
|
|
|
|
\
|
|
|
|
_n = (long) &((ptr)->name##_end) \
|
|
|
|
- (long) &((ptr)->name##_begin); \
|
2011-01-24 23:45:10 +03:00
|
|
|
BUILD_BUG_ON(_n < 0); \
|
2010-01-09 01:42:35 +03:00
|
|
|
\
|
|
|
|
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define kmemcheck_annotate_variable(var) \
|
|
|
|
do { \
|
|
|
|
kmemcheck_mark_initialized(&(var), sizeof(var)); \
|
|
|
|
} while (0) \
|
|
|
|
|
2008-04-04 02:51:41 +04:00
|
|
|
#else
|
|
|
|
#define kmemcheck_enabled 0
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
static inline void
|
2008-11-25 18:55:53 +03:00
|
|
|
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2008-11-25 18:55:53 +03:00
|
|
|
kmemcheck_free_shadow(struct page *page, int order)
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-11-25 18:55:53 +03:00
|
|
|
static inline void kmemcheck_pagealloc_alloc(struct page *p,
|
|
|
|
unsigned int order, gfp_t gfpflags)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 17:56:17 +04:00
|
|
|
static inline bool kmemcheck_page_is_tracked(struct page *p)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2008-07-20 12:44:54 +04:00
|
|
|
|
|
|
|
static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
2008-11-25 18:55:53 +03:00
|
|
|
|
|
|
|
static inline void kmemcheck_mark_unallocated_pages(struct page *p,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kmemcheck_mark_initialized_pages(struct page *p,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-08-27 17:50:00 +04:00
|
|
|
static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-01-09 01:42:35 +03:00
|
|
|
#define kmemcheck_bitfield_begin(name)
|
|
|
|
#define kmemcheck_bitfield_end(name)
|
|
|
|
#define kmemcheck_annotate_bitfield(ptr, name) \
|
|
|
|
do { \
|
|
|
|
} while (0)
|
2008-08-30 14:16:05 +04:00
|
|
|
|
2010-01-09 01:42:35 +03:00
|
|
|
#define kmemcheck_annotate_variable(var) \
|
|
|
|
do { \
|
2008-08-30 14:16:05 +04:00
|
|
|
} while (0)
|
|
|
|
|
2010-01-09 01:42:35 +03:00
|
|
|
#endif /* CONFIG_KMEMCHECK */
|
2008-08-30 14:16:05 +04:00
|
|
|
|
2008-04-04 02:51:41 +04:00
|
|
|
#endif /* LINUX_KMEMCHECK_H */
|