mm: slab: remove ZONE_DMA_FLAG
Now we have IS_ENABLED helper to check if a Kconfig option is enabled or not, so ZONE_DMA_FLAG sounds no longer useful. And, the use of ZONE_DMA_FLAG in slab looks pointless according to the comment [1] from Johannes Weiner, so remove them and ORing passed in flags with the cache gfp flags has been done in kmem_getpages(). [1] https://lkml.org/lkml/2014/9/25/553 Link: http://lkml.kernel.org/r/1462381297-11009-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi <yang.shi@linaro.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c7ce4f60ac
Коммит
a3187e438b
|
@ -268,11 +268,6 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION
|
|||
config PHYS_ADDR_T_64BIT
|
||||
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
|
||||
|
||||
config ZONE_DMA_FLAG
|
||||
int
|
||||
default "0" if !ZONE_DMA
|
||||
default "1"
|
||||
|
||||
config BOUNCE
|
||||
bool "Enable bounce buffers"
|
||||
default y
|
||||
|
|
23
mm/slab.c
23
mm/slab.c
|
@ -2236,7 +2236,7 @@ done:
|
|||
cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
|
||||
cachep->flags = flags;
|
||||
cachep->allocflags = __GFP_COMP;
|
||||
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
|
||||
if (flags & SLAB_CACHE_DMA)
|
||||
cachep->allocflags |= GFP_DMA;
|
||||
cachep->size = size;
|
||||
cachep->reciprocal_buffer_size = reciprocal_value(size);
|
||||
|
@ -2664,16 +2664,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|||
}
|
||||
}
|
||||
|
||||
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
|
||||
{
|
||||
if (CONFIG_ZONE_DMA_FLAG) {
|
||||
if (flags & GFP_DMA)
|
||||
BUG_ON(!(cachep->allocflags & GFP_DMA));
|
||||
else
|
||||
BUG_ON(cachep->allocflags & GFP_DMA);
|
||||
}
|
||||
}
|
||||
|
||||
static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
|
||||
{
|
||||
void *objp;
|
||||
|
@ -2752,14 +2742,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
|
|||
if (gfpflags_allow_blocking(local_flags))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* The test for missing atomic flag is performed here, rather than
|
||||
* the more obvious place, simply to reduce the critical path length
|
||||
* in kmem_cache_alloc(). If a caller is seriously mis-behaving they
|
||||
* will eventually be caught here (where it matters).
|
||||
*/
|
||||
kmem_flagcheck(cachep, flags);
|
||||
|
||||
/*
|
||||
* Get mem for the objs. Attempt to allocate a physical page from
|
||||
* 'nodeid'.
|
||||
|
@ -3145,9 +3127,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
|
|||
gfp_t flags)
|
||||
{
|
||||
might_sleep_if(gfpflags_allow_blocking(flags));
|
||||
#if DEBUG
|
||||
kmem_flagcheck(cachep, flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
|
|
Загрузка…
Ссылка в новой задаче