mm: emit the "free" trace report before freeing memory in kmem_cache_free()
After the memory is freed, it can be immediately allocated by other CPUs, before the "free" trace report has been emitted. This causes inaccurate traces. For example, if the following sequence of events occurs: CPU 0 CPU 1 (1) alloc xxxxxx (2) free xxxxxx (3) alloc xxxxxx (4) free xxxxxx Then they will be inaccurately reported via tracing, so that they appear to have happened in this order: CPU 0 CPU 1 (1) alloc xxxxxx (2) alloc xxxxxx (3) free xxxxxx (4) free xxxxxx This makes it look like CPU 1 somehow managed to allocate memory that CPU 0 still had allocated for itself. In order to avoid this, emit the "free xxxxxx" tracing report just before the actual call to free the memory, instead of just after it. Link: https://lkml.kernel.org/r/374eb75d-7404-8721-4e1e-65b0e5b17279@huawei.com Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
85b6d24646
Коммит
9a543f007b
|
@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
|||
if (!cachep)
|
||||
return;
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
|
||||
local_irq_save(flags);
|
||||
debug_check_no_locks_freed(objp, cachep->object_size);
|
||||
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
|
||||
debug_check_no_obj_freed(objp, cachep->object_size);
|
||||
__cache_free(cachep, objp, _RET_IP_);
|
||||
local_irq_restore(flags);
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
|
|
@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head)
|
|||
void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||
{
|
||||
kmemleak_free_recursive(b, c->flags);
|
||||
trace_kmem_cache_free(_RET_IP_, b, c->name);
|
||||
if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
|
||||
struct slob_rcu *slob_rcu;
|
||||
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
|
||||
|
@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
|
|||
} else {
|
||||
__kmem_cache_free(b, c->size);
|
||||
}
|
||||
|
||||
trace_kmem_cache_free(_RET_IP_, b, c->name);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
|
|
@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
|||
s = cache_from_obj(s, x);
|
||||
if (!s)
|
||||
return;
|
||||
slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
|
||||
trace_kmem_cache_free(_RET_IP_, x, s->name);
|
||||
slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче