mm/mempool: add 'else' to split mutually exclusive case
Add else to split mutually exclusive case and avoid some unnecessary check. It doesn't seem to change code generation (compiler is smart), but I think it helps readability. [akpm@linux-foundation.org: fix comment location] Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20200924111641.28922-1-linmiaohe@huawei.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
f8fd52535c
Коммит
544941d788
18
mm/mempool.c
18
mm/mempool.c
|
@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
|
||||||
static void check_element(mempool_t *pool, void *element)
|
static void check_element(mempool_t *pool, void *element)
|
||||||
{
|
{
|
||||||
/* Mempools backed by slab allocator */
|
/* Mempools backed by slab allocator */
|
||||||
if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
|
if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
|
||||||
__check_element(pool, element, ksize(element));
|
__check_element(pool, element, ksize(element));
|
||||||
|
} else if (pool->free == mempool_free_pages) {
|
||||||
/* Mempools backed by page allocator */
|
/* Mempools backed by page allocator */
|
||||||
if (pool->free == mempool_free_pages) {
|
|
||||||
int order = (int)(long)pool->pool_data;
|
int order = (int)(long)pool->pool_data;
|
||||||
void *addr = kmap_atomic((struct page *)element);
|
void *addr = kmap_atomic((struct page *)element);
|
||||||
|
|
||||||
|
@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size)
|
||||||
static void poison_element(mempool_t *pool, void *element)
|
static void poison_element(mempool_t *pool, void *element)
|
||||||
{
|
{
|
||||||
/* Mempools backed by slab allocator */
|
/* Mempools backed by slab allocator */
|
||||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
|
||||||
__poison_element(element, ksize(element));
|
__poison_element(element, ksize(element));
|
||||||
|
} else if (pool->alloc == mempool_alloc_pages) {
|
||||||
/* Mempools backed by page allocator */
|
/* Mempools backed by page allocator */
|
||||||
if (pool->alloc == mempool_alloc_pages) {
|
|
||||||
int order = (int)(long)pool->pool_data;
|
int order = (int)(long)pool->pool_data;
|
||||||
void *addr = kmap_atomic((struct page *)element);
|
void *addr = kmap_atomic((struct page *)element);
|
||||||
|
|
||||||
|
@ -107,7 +105,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
|
||||||
{
|
{
|
||||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||||
kasan_poison_kfree(element, _RET_IP_);
|
kasan_poison_kfree(element, _RET_IP_);
|
||||||
if (pool->alloc == mempool_alloc_pages)
|
else if (pool->alloc == mempool_alloc_pages)
|
||||||
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +113,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
|
||||||
{
|
{
|
||||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||||
kasan_unpoison_slab(element);
|
kasan_unpoison_slab(element);
|
||||||
if (pool->alloc == mempool_alloc_pages)
|
else if (pool->alloc == mempool_alloc_pages)
|
||||||
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
|
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче