mm: use 'unsigned int' for page order
Let's try to be consistent about data type of page order. [sfr@canb.auug.org.au: fix build (type of pageblock_order)] [hughd@google.com: some configs end up with MAX_ORDER and pageblock_order having different types] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
1d798ca3f1
Коммит
d00181b96e
|
@ -550,7 +550,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
|
||||||
return compound_page_dtors[page[1].compound_dtor];
|
return compound_page_dtors[page[1].compound_dtor];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int compound_order(struct page *page)
|
static inline unsigned int compound_order(struct page *page)
|
||||||
{
|
{
|
||||||
if (!PageHead(page))
|
if (!PageHead(page))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1810,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
|
||||||
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
||||||
|
|
||||||
extern __printf(3, 4)
|
extern __printf(3, 4)
|
||||||
void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
|
void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
|
||||||
|
const char *fmt, ...);
|
||||||
|
|
||||||
extern void setup_per_cpu_pageset(void);
|
extern void setup_per_cpu_pageset(void);
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ enum pageblock_bits {
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
||||||
|
|
||||||
/* Huge page sizes are variable */
|
/* Huge page sizes are variable */
|
||||||
extern int pageblock_order;
|
extern unsigned int pageblock_order;
|
||||||
|
|
||||||
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
|
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
|
||||||
|
|
||||||
|
|
19
mm/hugetlb.c
19
mm/hugetlb.c
|
@ -994,7 +994,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
|
||||||
|
|
||||||
#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
|
#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
|
||||||
static void destroy_compound_gigantic_page(struct page *page,
|
static void destroy_compound_gigantic_page(struct page *page,
|
||||||
unsigned long order)
|
unsigned int order)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int nr_pages = 1 << order;
|
int nr_pages = 1 << order;
|
||||||
|
@ -1009,7 +1009,7 @@ static void destroy_compound_gigantic_page(struct page *page,
|
||||||
__ClearPageHead(page);
|
__ClearPageHead(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_gigantic_page(struct page *page, unsigned order)
|
static void free_gigantic_page(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
free_contig_range(page_to_pfn(page), 1 << order);
|
free_contig_range(page_to_pfn(page), 1 << order);
|
||||||
}
|
}
|
||||||
|
@ -1053,7 +1053,7 @@ static bool zone_spans_last_pfn(const struct zone *zone,
|
||||||
return zone_spans_pfn(zone, last_pfn);
|
return zone_spans_pfn(zone, last_pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *alloc_gigantic_page(int nid, unsigned order)
|
static struct page *alloc_gigantic_page(int nid, unsigned int order)
|
||||||
{
|
{
|
||||||
unsigned long nr_pages = 1 << order;
|
unsigned long nr_pages = 1 << order;
|
||||||
unsigned long ret, pfn, flags;
|
unsigned long ret, pfn, flags;
|
||||||
|
@ -1089,7 +1089,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned order)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
|
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
|
||||||
static void prep_compound_gigantic_page(struct page *page, unsigned long order);
|
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
|
||||||
|
|
||||||
static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
|
static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
|
||||||
{
|
{
|
||||||
|
@ -1122,9 +1122,9 @@ static int alloc_fresh_gigantic_page(struct hstate *h,
|
||||||
static inline bool gigantic_page_supported(void) { return true; }
|
static inline bool gigantic_page_supported(void) { return true; }
|
||||||
#else
|
#else
|
||||||
static inline bool gigantic_page_supported(void) { return false; }
|
static inline bool gigantic_page_supported(void) { return false; }
|
||||||
static inline void free_gigantic_page(struct page *page, unsigned order) { }
|
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
|
||||||
static inline void destroy_compound_gigantic_page(struct page *page,
|
static inline void destroy_compound_gigantic_page(struct page *page,
|
||||||
unsigned long order) { }
|
unsigned int order) { }
|
||||||
static inline int alloc_fresh_gigantic_page(struct hstate *h,
|
static inline int alloc_fresh_gigantic_page(struct hstate *h,
|
||||||
nodemask_t *nodes_allowed) { return 0; }
|
nodemask_t *nodes_allowed) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
@ -1250,7 +1250,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
|
||||||
put_page(page); /* free it into the hugepage allocator */
|
put_page(page); /* free it into the hugepage allocator */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prep_compound_gigantic_page(struct page *page, unsigned long order)
|
static void prep_compound_gigantic_page(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int nr_pages = 1 << order;
|
int nr_pages = 1 << order;
|
||||||
|
@ -1968,7 +1968,8 @@ found:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init prep_compound_huge_page(struct page *page, int order)
|
static void __init prep_compound_huge_page(struct page *page,
|
||||||
|
unsigned int order)
|
||||||
{
|
{
|
||||||
if (unlikely(order > (MAX_ORDER - 1)))
|
if (unlikely(order > (MAX_ORDER - 1)))
|
||||||
prep_compound_gigantic_page(page, order);
|
prep_compound_gigantic_page(page, order);
|
||||||
|
@ -2679,7 +2680,7 @@ static int __init hugetlb_init(void)
|
||||||
module_init(hugetlb_init);
|
module_init(hugetlb_init);
|
||||||
|
|
||||||
/* Should be called on processing a hugepagesz=... option */
|
/* Should be called on processing a hugepagesz=... option */
|
||||||
void __init hugetlb_add_hstate(unsigned order)
|
void __init hugetlb_add_hstate(unsigned int order)
|
||||||
{
|
{
|
||||||
struct hstate *h;
|
struct hstate *h;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
|
@ -177,7 +177,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
|
||||||
extern int __isolate_free_page(struct page *page, unsigned int order);
|
extern int __isolate_free_page(struct page *page, unsigned int order);
|
||||||
extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
|
extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
|
||||||
unsigned int order);
|
unsigned int order);
|
||||||
extern void prep_compound_page(struct page *page, unsigned long order);
|
extern void prep_compound_page(struct page *page, unsigned int order);
|
||||||
#ifdef CONFIG_MEMORY_FAILURE
|
#ifdef CONFIG_MEMORY_FAILURE
|
||||||
extern bool is_free_buddy_page(struct page *page);
|
extern bool is_free_buddy_page(struct page *page);
|
||||||
#endif
|
#endif
|
||||||
|
@ -235,7 +235,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
|
||||||
* page cannot be allocated or merged in parallel. Alternatively, it must
|
* page cannot be allocated or merged in parallel. Alternatively, it must
|
||||||
* handle invalid values gracefully, and use page_order_unsafe() below.
|
* handle invalid values gracefully, and use page_order_unsafe() below.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long page_order(struct page *page)
|
static inline unsigned int page_order(struct page *page)
|
||||||
{
|
{
|
||||||
/* PageBuddy() must be checked by the caller */
|
/* PageBuddy() must be checked by the caller */
|
||||||
return page_private(page);
|
return page_private(page);
|
||||||
|
|
|
@ -181,7 +181,7 @@ bool pm_suspended_storage(void)
|
||||||
#endif /* CONFIG_PM_SLEEP */
|
#endif /* CONFIG_PM_SLEEP */
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
|
||||||
int pageblock_order __read_mostly;
|
unsigned int pageblock_order __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __free_pages_ok(struct page *page, unsigned int order);
|
static void __free_pages_ok(struct page *page, unsigned int order);
|
||||||
|
@ -462,7 +462,7 @@ static void free_compound_page(struct page *page)
|
||||||
__free_pages_ok(page, compound_order(page));
|
__free_pages_ok(page, compound_order(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
void prep_compound_page(struct page *page, unsigned long order)
|
void prep_compound_page(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int nr_pages = 1 << order;
|
int nr_pages = 1 << order;
|
||||||
|
@ -662,7 +662,7 @@ static inline void __free_one_page(struct page *page,
|
||||||
unsigned long combined_idx;
|
unsigned long combined_idx;
|
||||||
unsigned long uninitialized_var(buddy_idx);
|
unsigned long uninitialized_var(buddy_idx);
|
||||||
struct page *buddy;
|
struct page *buddy;
|
||||||
int max_order = MAX_ORDER;
|
unsigned int max_order = MAX_ORDER;
|
||||||
|
|
||||||
VM_BUG_ON(!zone_is_initialized(zone));
|
VM_BUG_ON(!zone_is_initialized(zone));
|
||||||
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
|
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
|
||||||
|
@ -675,7 +675,7 @@ static inline void __free_one_page(struct page *page,
|
||||||
* pageblock. Without this, pageblock isolation
|
* pageblock. Without this, pageblock isolation
|
||||||
* could cause incorrect freepage accounting.
|
* could cause incorrect freepage accounting.
|
||||||
*/
|
*/
|
||||||
max_order = min(MAX_ORDER, pageblock_order + 1);
|
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
|
||||||
} else {
|
} else {
|
||||||
__mod_zone_freepage_state(zone, 1 << order, migratetype);
|
__mod_zone_freepage_state(zone, 1 << order, migratetype);
|
||||||
}
|
}
|
||||||
|
@ -1471,7 +1471,7 @@ int move_freepages(struct zone *zone,
|
||||||
int migratetype)
|
int migratetype)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long order;
|
unsigned int order;
|
||||||
int pages_moved = 0;
|
int pages_moved = 0;
|
||||||
|
|
||||||
#ifndef CONFIG_HOLES_IN_ZONE
|
#ifndef CONFIG_HOLES_IN_ZONE
|
||||||
|
@ -1584,7 +1584,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
|
||||||
static void steal_suitable_fallback(struct zone *zone, struct page *page,
|
static void steal_suitable_fallback(struct zone *zone, struct page *page,
|
||||||
int start_type)
|
int start_type)
|
||||||
{
|
{
|
||||||
int current_order = page_order(page);
|
unsigned int current_order = page_order(page);
|
||||||
int pages;
|
int pages;
|
||||||
|
|
||||||
/* Take ownership for orders >= pageblock_order */
|
/* Take ownership for orders >= pageblock_order */
|
||||||
|
@ -2637,7 +2637,7 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
|
||||||
DEFAULT_RATELIMIT_INTERVAL,
|
DEFAULT_RATELIMIT_INTERVAL,
|
||||||
DEFAULT_RATELIMIT_BURST);
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
|
void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
|
||||||
{
|
{
|
||||||
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
unsigned int filter = SHOW_MEM_FILTER_NODES;
|
||||||
|
|
||||||
|
@ -2671,7 +2671,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
|
||||||
va_end(args);
|
va_end(args);
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
|
pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
|
||||||
current->comm, order, gfp_mask);
|
current->comm, order, gfp_mask);
|
||||||
|
|
||||||
dump_stack();
|
dump_stack();
|
||||||
|
@ -3449,7 +3449,8 @@ void free_kmem_pages(unsigned long addr, unsigned int order)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
|
static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
||||||
|
size_t size)
|
||||||
{
|
{
|
||||||
if (addr) {
|
if (addr) {
|
||||||
unsigned long alloc_end = addr + (PAGE_SIZE << order);
|
unsigned long alloc_end = addr + (PAGE_SIZE << order);
|
||||||
|
@ -3499,7 +3500,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
|
||||||
*/
|
*/
|
||||||
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
|
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
unsigned order = get_order(size);
|
unsigned int order = get_order(size);
|
||||||
struct page *p = alloc_pages_node(nid, gfp_mask, order);
|
struct page *p = alloc_pages_node(nid, gfp_mask, order);
|
||||||
if (!p)
|
if (!p)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -3800,7 +3801,8 @@ void show_free_areas(unsigned int filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
unsigned int order;
|
||||||
|
unsigned long nr[MAX_ORDER], flags, total = 0;
|
||||||
unsigned char types[MAX_ORDER];
|
unsigned char types[MAX_ORDER];
|
||||||
|
|
||||||
if (skip_free_areas_node(filter, zone_to_nid(zone)))
|
if (skip_free_areas_node(filter, zone_to_nid(zone)))
|
||||||
|
@ -4149,7 +4151,7 @@ static void build_zonelists(pg_data_t *pgdat)
|
||||||
nodemask_t used_mask;
|
nodemask_t used_mask;
|
||||||
int local_node, prev_node;
|
int local_node, prev_node;
|
||||||
struct zonelist *zonelist;
|
struct zonelist *zonelist;
|
||||||
int order = current_zonelist_order;
|
unsigned int order = current_zonelist_order;
|
||||||
|
|
||||||
/* initialize zonelists */
|
/* initialize zonelists */
|
||||||
for (i = 0; i < MAX_ZONELISTS; i++) {
|
for (i = 0; i < MAX_ZONELISTS; i++) {
|
||||||
|
@ -6678,7 +6680,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||||
unsigned migratetype)
|
unsigned migratetype)
|
||||||
{
|
{
|
||||||
unsigned long outer_start, outer_end;
|
unsigned long outer_start, outer_end;
|
||||||
int ret = 0, order;
|
unsigned int order;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
struct compact_control cc = {
|
struct compact_control cc = {
|
||||||
.nr_migratepages = 0,
|
.nr_migratepages = 0,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче