Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as network buffers or are reclaimable such as inode allocations. When something like updatedb is called, long-lived and unmovable kernel allocations tend to be spread throughout the address space which increases fragmentation. This patch groups these allocations together as much as possible by adding a new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be reclaimed on demand, but not moved. i.e. they can be migrated by deleting them and re-reading the information from elsewhere. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c361be55b3
Коммит
e12ba74d8f
|
@ -3169,7 +3169,8 @@ static void recalc_bh_state(void)
|
||||||
|
|
||||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
||||||
{
|
{
|
||||||
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
|
||||||
|
set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
||||||
get_cpu_var(bh_accounting).nr++;
|
get_cpu_var(bh_accounting).nr++;
|
||||||
|
|
|
@ -903,7 +903,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
char *dname;
|
char *dname;
|
||||||
|
|
||||||
dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
|
dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
|
||||||
if (!dentry)
|
if (!dentry)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -1710,7 +1710,7 @@ static int journal_init_journal_head_cache(void)
|
||||||
journal_head_cache = kmem_cache_create("journal_head",
|
journal_head_cache = kmem_cache_create("journal_head",
|
||||||
sizeof(struct journal_head),
|
sizeof(struct journal_head),
|
||||||
0, /* offset */
|
0, /* offset */
|
||||||
0, /* flags */
|
SLAB_TEMPORARY, /* flags */
|
||||||
NULL); /* ctor */
|
NULL); /* ctor */
|
||||||
retval = 0;
|
retval = 0;
|
||||||
if (journal_head_cache == 0) {
|
if (journal_head_cache == 0) {
|
||||||
|
@ -2006,7 +2006,7 @@ static int __init journal_init_handle_cache(void)
|
||||||
jbd_handle_cache = kmem_cache_create("journal_handle",
|
jbd_handle_cache = kmem_cache_create("journal_handle",
|
||||||
sizeof(handle_t),
|
sizeof(handle_t),
|
||||||
0, /* offset */
|
0, /* offset */
|
||||||
0, /* flags */
|
SLAB_TEMPORARY, /* flags */
|
||||||
NULL); /* ctor */
|
NULL); /* ctor */
|
||||||
if (jbd_handle_cache == NULL) {
|
if (jbd_handle_cache == NULL) {
|
||||||
printk(KERN_EMERG "JBD: failed to create handle cache\n");
|
printk(KERN_EMERG "JBD: failed to create handle cache\n");
|
||||||
|
|
|
@ -170,13 +170,15 @@ int __init journal_init_revoke_caches(void)
|
||||||
{
|
{
|
||||||
revoke_record_cache = kmem_cache_create("revoke_record",
|
revoke_record_cache = kmem_cache_create("revoke_record",
|
||||||
sizeof(struct jbd_revoke_record_s),
|
sizeof(struct jbd_revoke_record_s),
|
||||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
0,
|
||||||
|
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
|
||||||
|
NULL);
|
||||||
if (revoke_record_cache == 0)
|
if (revoke_record_cache == 0)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
revoke_table_cache = kmem_cache_create("revoke_table",
|
revoke_table_cache = kmem_cache_create("revoke_table",
|
||||||
sizeof(struct jbd_revoke_table_s),
|
sizeof(struct jbd_revoke_table_s),
|
||||||
0, 0, NULL);
|
0, SLAB_TEMPORARY, NULL);
|
||||||
if (revoke_table_cache == 0) {
|
if (revoke_table_cache == 0) {
|
||||||
kmem_cache_destroy(revoke_record_cache);
|
kmem_cache_destroy(revoke_record_cache);
|
||||||
revoke_record_cache = NULL;
|
revoke_record_cache = NULL;
|
||||||
|
|
|
@ -492,7 +492,7 @@ static ssize_t proc_info_read(struct file * file, char __user * buf,
|
||||||
count = PROC_BLOCK_SIZE;
|
count = PROC_BLOCK_SIZE;
|
||||||
|
|
||||||
length = -ENOMEM;
|
length = -ENOMEM;
|
||||||
if (!(page = __get_free_page(GFP_KERNEL)))
|
if (!(page = __get_free_page(GFP_TEMPORARY)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
length = PROC_I(inode)->op.proc_read(task, (char*)page);
|
length = PROC_I(inode)->op.proc_read(task, (char*)page);
|
||||||
|
@ -532,7 +532,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
page = (char *)__get_free_page(GFP_USER);
|
page = (char *)__get_free_page(GFP_TEMPORARY);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -602,7 +602,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
copied = -ENOMEM;
|
copied = -ENOMEM;
|
||||||
page = (char *)__get_free_page(GFP_USER);
|
page = (char *)__get_free_page(GFP_TEMPORARY);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -788,7 +788,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
|
||||||
/* No partial writes. */
|
/* No partial writes. */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
page = (char*)__get_free_page(GFP_USER);
|
page = (char*)__get_free_page(GFP_TEMPORARY);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
length = -EFAULT;
|
length = -EFAULT;
|
||||||
|
@ -954,7 +954,8 @@ static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
|
||||||
char __user *buffer, int buflen)
|
char __user *buffer, int buflen)
|
||||||
{
|
{
|
||||||
struct inode * inode;
|
struct inode * inode;
|
||||||
char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
|
char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
|
||||||
|
char *path;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
|
@ -1726,7 +1727,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
length = -ENOMEM;
|
length = -ENOMEM;
|
||||||
page = (char*)__get_free_page(GFP_USER);
|
page = (char*)__get_free_page(GFP_TEMPORARY);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes,
|
||||||
nbytes = MAX_NON_LFS - pos;
|
nbytes = MAX_NON_LFS - pos;
|
||||||
|
|
||||||
dp = PDE(inode);
|
dp = PDE(inode);
|
||||||
if (!(page = (char*) __get_free_page(GFP_KERNEL)))
|
if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while ((nbytes > 0) && !eof) {
|
while ((nbytes > 0) && !eof) {
|
||||||
|
|
|
@ -48,9 +48,10 @@ struct vm_area_struct;
|
||||||
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
|
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
|
||||||
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
|
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
|
||||||
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
|
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
|
||||||
#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
|
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
|
||||||
|
#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
|
||||||
|
|
||||||
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
|
#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
|
||||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||||
|
|
||||||
/* This equals 0, but use constants in case they ever change */
|
/* This equals 0, but use constants in case they ever change */
|
||||||
|
@ -60,6 +61,8 @@ struct vm_area_struct;
|
||||||
#define GFP_NOIO (__GFP_WAIT)
|
#define GFP_NOIO (__GFP_WAIT)
|
||||||
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
|
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
|
||||||
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
|
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
|
||||||
|
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
|
||||||
|
__GFP_RECLAIMABLE)
|
||||||
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
|
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
|
||||||
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
|
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
|
||||||
__GFP_HIGHMEM)
|
__GFP_HIGHMEM)
|
||||||
|
@ -80,7 +83,7 @@ struct vm_area_struct;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This mask makes up all the page movable related flags */
|
/* This mask makes up all the page movable related flags */
|
||||||
#define GFP_MOVABLE_MASK (__GFP_MOVABLE)
|
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
|
||||||
|
|
||||||
/* Control page allocator reclaim behavior */
|
/* Control page allocator reclaim behavior */
|
||||||
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
||||||
|
@ -129,6 +132,12 @@ static inline enum zone_type gfp_zone(gfp_t flags)
|
||||||
return base + ZONE_NORMAL;
|
return base + ZONE_NORMAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
|
||||||
|
{
|
||||||
|
BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
|
||||||
|
return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is only one page-allocator function, and two main namespaces to
|
* There is only one page-allocator function, and two main namespaces to
|
||||||
* it. The alloc_page*() variants return 'struct page *' and as such
|
* it. The alloc_page*() variants return 'struct page *' and as such
|
||||||
|
|
|
@ -35,10 +35,12 @@
|
||||||
|
|
||||||
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
|
||||||
#define MIGRATE_UNMOVABLE 0
|
#define MIGRATE_UNMOVABLE 0
|
||||||
#define MIGRATE_MOVABLE 1
|
#define MIGRATE_RECLAIMABLE 1
|
||||||
#define MIGRATE_TYPES 2
|
#define MIGRATE_MOVABLE 2
|
||||||
|
#define MIGRATE_TYPES 3
|
||||||
#else
|
#else
|
||||||
#define MIGRATE_UNMOVABLE 0
|
#define MIGRATE_UNMOVABLE 0
|
||||||
|
#define MIGRATE_UNRECLAIMABLE 0
|
||||||
#define MIGRATE_MOVABLE 0
|
#define MIGRATE_MOVABLE 0
|
||||||
#define MIGRATE_TYPES 1
|
#define MIGRATE_TYPES 1
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
|
|
||||||
/* Bit indices that affect a whole block of pages */
|
/* Bit indices that affect a whole block of pages */
|
||||||
enum pageblock_bits {
|
enum pageblock_bits {
|
||||||
PB_range(PB_migrate, 1), /* 1 bit required for migrate types */
|
PB_range(PB_migrate, 2), /* 2 bits required for migrate types */
|
||||||
NR_PAGEBLOCK_BITS
|
NR_PAGEBLOCK_BITS
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -24,12 +24,14 @@
|
||||||
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
|
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
|
||||||
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
|
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
|
||||||
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
|
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
|
||||||
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
|
|
||||||
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
|
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
|
||||||
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
|
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
|
||||||
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
|
||||||
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
|
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
|
||||||
|
|
||||||
|
/* The following flags affect the page allocator grouping pages by mobility */
|
||||||
|
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
|
||||||
|
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
|
||||||
/*
|
/*
|
||||||
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
||||||
*
|
*
|
||||||
|
|
|
@ -1463,7 +1463,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
|
||||||
ssize_t retval = 0;
|
ssize_t retval = 0;
|
||||||
char *s;
|
char *s;
|
||||||
|
|
||||||
if (!(page = (char *)__get_free_page(GFP_KERNEL)))
|
if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
s = page;
|
s = page;
|
||||||
|
|
|
@ -98,7 +98,8 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
||||||
struct radix_tree_node *ret;
|
struct radix_tree_node *ret;
|
||||||
gfp_t gfp_mask = root_gfp_mask(root);
|
gfp_t gfp_mask = root_gfp_mask(root);
|
||||||
|
|
||||||
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
ret = kmem_cache_alloc(radix_tree_node_cachep,
|
||||||
|
set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
|
||||||
if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
|
if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
|
||||||
struct radix_tree_preload *rtp;
|
struct radix_tree_preload *rtp;
|
||||||
|
|
||||||
|
@ -142,7 +143,8 @@ int radix_tree_preload(gfp_t gfp_mask)
|
||||||
rtp = &__get_cpu_var(radix_tree_preloads);
|
rtp = &__get_cpu_var(radix_tree_preloads);
|
||||||
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
|
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
node = kmem_cache_alloc(radix_tree_node_cachep,
|
||||||
|
set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
|
||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
goto out;
|
goto out;
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
|
@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
|
||||||
|
|
||||||
static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
|
static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
|
||||||
{
|
{
|
||||||
return ((gfp_flags & __GFP_MOVABLE) != 0);
|
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
|
||||||
|
|
||||||
|
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
|
||||||
|
((gfp_flags & __GFP_RECLAIMABLE) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
|
||||||
* the free lists for the desirable migrate type are depleted
|
* the free lists for the desirable migrate type are depleted
|
||||||
*/
|
*/
|
||||||
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
|
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
|
||||||
[MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE },
|
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
|
||||||
[MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE },
|
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
|
||||||
|
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
|
||||||
* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
|
* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
|
||||||
* might be reconsidered if it ever diverges from PAGE_SIZE.
|
* might be reconsidered if it ever diverges from PAGE_SIZE.
|
||||||
*
|
*
|
||||||
* __GFP_MOVABLE is masked out as swap vectors cannot move
|
* Mobility flags are masked out as swap vectors cannot move
|
||||||
*/
|
*/
|
||||||
return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
|
return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
|
||||||
PAGE_CACHE_SHIFT-PAGE_SHIFT);
|
PAGE_CACHE_SHIFT-PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
flags |= cachep->gfpflags;
|
flags |= cachep->gfpflags;
|
||||||
|
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
|
||||||
|
flags |= __GFP_RECLAIMABLE;
|
||||||
|
|
||||||
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
|
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
|
||||||
if (!page)
|
if (!page)
|
||||||
|
|
|
@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
if (s->flags & SLAB_CACHE_DMA)
|
if (s->flags & SLAB_CACHE_DMA)
|
||||||
flags |= SLUB_DMA;
|
flags |= SLUB_DMA;
|
||||||
|
|
||||||
|
if (s->flags & SLAB_RECLAIM_ACCOUNT)
|
||||||
|
flags |= __GFP_RECLAIMABLE;
|
||||||
|
|
||||||
if (node == -1)
|
if (node == -1)
|
||||||
page = alloc_pages(flags, s->order);
|
page = alloc_pages(flags, s->order);
|
||||||
else
|
else
|
||||||
|
|
Загрузка…
Ссылка в новой задаче