2010-07-12 08:36:09 +04:00
|
|
|
#ifndef _LINUX_MEMBLOCK_H
|
|
|
|
#define _LINUX_MEMBLOCK_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2010-07-28 09:28:21 +04:00
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK
|
2010-07-12 08:36:09 +04:00
|
|
|
/*
|
|
|
|
* Logical memory blocks.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Peter Bergner, IBM Corp.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2010-07-28 09:20:58 +04:00
|
|
|
#define INIT_MEMBLOCK_REGIONS 128
|
2014-01-29 21:16:01 +04:00
|
|
|
#define INIT_PHYSMEM_REGIONS 4
|
2010-07-12 08:36:09 +04:00
|
|
|
|
2018-06-30 17:55:04 +03:00
|
|
|
/**
|
|
|
|
* enum memblock_flags - definition of memory region attributes
|
|
|
|
* @MEMBLOCK_NONE: no special request
|
|
|
|
* @MEMBLOCK_HOTPLUG: hotpluggable region
|
|
|
|
* @MEMBLOCK_MIRROR: mirrored region
|
|
|
|
* @MEMBLOCK_NOMAP: don't add to kernel direct mapping
|
|
|
|
*/
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags {
|
2015-06-25 02:58:09 +03:00
|
|
|
MEMBLOCK_NONE = 0x0, /* No special request */
|
|
|
|
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
|
2015-06-25 02:58:12 +03:00
|
|
|
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
|
2015-11-30 15:28:15 +03:00
|
|
|
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
|
2015-06-25 02:58:09 +03:00
|
|
|
};
|
2014-01-22 03:49:23 +04:00
|
|
|
|
2018-06-30 17:55:04 +03:00
|
|
|
/**
|
|
|
|
* struct memblock_region - represents a memory region
|
|
|
|
* @base: physical address of the region
|
|
|
|
* @size: size of the region
|
|
|
|
* @flags: memory region attributes
|
|
|
|
* @nid: NUMA node id
|
|
|
|
*/
|
2010-08-04 08:06:41 +04:00
|
|
|
struct memblock_region {
|
2010-08-04 07:34:42 +04:00
|
|
|
phys_addr_t base;
|
|
|
|
phys_addr_t size;
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags flags;
|
2011-07-14 13:43:42 +04:00
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
|
int nid;
|
|
|
|
#endif
|
2010-07-12 08:36:09 +04:00
|
|
|
};
|
|
|
|
|
2018-06-30 17:55:04 +03:00
|
|
|
/**
|
|
|
|
* struct memblock_type - collection of memory regions of certain type
|
|
|
|
* @cnt: number of regions
|
|
|
|
* @max: size of the allocated array
|
|
|
|
* @total_size: size of all regions
|
|
|
|
* @regions: array of regions
|
|
|
|
* @name: the memory type symbolic name
|
|
|
|
*/
|
2010-08-04 08:06:41 +04:00
|
|
|
struct memblock_type {
|
2018-06-30 17:55:04 +03:00
|
|
|
unsigned long cnt;
|
|
|
|
unsigned long max;
|
|
|
|
phys_addr_t total_size;
|
2010-07-07 02:39:06 +04:00
|
|
|
struct memblock_region *regions;
|
2017-02-25 01:55:59 +03:00
|
|
|
char *name;
|
2010-07-12 08:36:09 +04:00
|
|
|
};
|
|
|
|
|
2018-06-30 17:55:04 +03:00
|
|
|
/**
|
|
|
|
* struct memblock - memblock allocator metadata
|
|
|
|
* @bottom_up: is bottom up direction?
|
|
|
|
* @current_limit: physical address of the current allocation limit
|
|
|
|
* @memory: usabe memory regions
|
|
|
|
* @reserved: reserved memory regions
|
|
|
|
* @physmem: all physical memory
|
|
|
|
*/
|
2010-07-12 08:36:09 +04:00
|
|
|
struct memblock {
|
2013-11-13 03:07:59 +04:00
|
|
|
bool bottom_up; /* is bottom up direction? */
|
2010-08-04 07:34:42 +04:00
|
|
|
phys_addr_t current_limit;
|
2010-08-04 08:06:41 +04:00
|
|
|
struct memblock_type memory;
|
|
|
|
struct memblock_type reserved;
|
2014-01-29 21:16:01 +04:00
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
|
|
|
struct memblock_type physmem;
|
|
|
|
#endif
|
2010-07-12 08:36:09 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
extern struct memblock memblock;
|
2010-07-28 09:07:21 +04:00
|
|
|
extern int memblock_debug;
|
|
|
|
|
2016-01-16 03:57:11 +03:00
|
|
|
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
|
|
|
#define __init_memblock __meminit
|
|
|
|
#define __initdata_memblock __meminitdata
|
2017-08-19 01:16:05 +03:00
|
|
|
void memblock_discard(void);
|
2016-01-16 03:57:11 +03:00
|
|
|
#else
|
|
|
|
#define __init_memblock
|
|
|
|
#define __initdata_memblock
|
|
|
|
#endif
|
|
|
|
|
2010-07-28 09:07:21 +04:00
|
|
|
#define memblock_dbg(fmt, ...) \
|
|
|
|
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
|
2010-07-12 08:36:09 +04:00
|
|
|
|
2014-01-22 03:50:14 +04:00
|
|
|
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
2016-07-27 01:24:47 +03:00
|
|
|
phys_addr_t start, phys_addr_t end,
|
2018-06-30 17:55:01 +03:00
|
|
|
int nid, enum memblock_flags flags);
|
2011-07-12 11:58:10 +04:00
|
|
|
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
|
|
|
phys_addr_t size, phys_addr_t align);
|
2011-12-08 22:22:08 +04:00
|
|
|
void memblock_allow_resize(void);
|
2011-12-08 22:22:08 +04:00
|
|
|
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
2011-12-08 22:22:06 +04:00
|
|
|
int memblock_add(phys_addr_t base, phys_addr_t size);
|
|
|
|
int memblock_remove(phys_addr_t base, phys_addr_t size);
|
|
|
|
int memblock_free(phys_addr_t base, phys_addr_t size);
|
|
|
|
int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
2012-10-23 03:35:18 +04:00
|
|
|
void memblock_trim_memory(phys_addr_t align);
|
mem-hotplug: handle node hole when initializing numa_meminfo.
When parsing SRAT, all memory ranges are added into numa_meminfo. In
numa_init(), before entering numa_cleanup_meminfo(), all possible memory
ranges are in numa_meminfo. And numa_cleanup_meminfo() removes all
ranges over max_pfn or empty.
But, this only works if the nodes are continuous. Let's have a look at
the following example:
We have an SRAT like this:
SRAT: Node 0 PXM 0 [mem 0x00000000-0x5fffffff]
SRAT: Node 0 PXM 0 [mem 0x100000000-0x1ffffffffff]
SRAT: Node 1 PXM 1 [mem 0x20000000000-0x3ffffffffff]
SRAT: Node 4 PXM 2 [mem 0x40000000000-0x5ffffffffff] hotplug
SRAT: Node 5 PXM 3 [mem 0x60000000000-0x7ffffffffff] hotplug
SRAT: Node 2 PXM 4 [mem 0x80000000000-0x9ffffffffff] hotplug
SRAT: Node 3 PXM 5 [mem 0xa0000000000-0xbffffffffff] hotplug
SRAT: Node 6 PXM 6 [mem 0xc0000000000-0xdffffffffff] hotplug
SRAT: Node 7 PXM 7 [mem 0xe0000000000-0xfffffffffff] hotplug
On boot, only node 0,1,2,3 exist.
And the numa_meminfo will look like this:
numa_meminfo.nr_blks = 9
1. on node 0: [0, 60000000]
2. on node 0: [100000000, 20000000000]
3. on node 1: [20000000000, 40000000000]
4. on node 4: [40000000000, 60000000000]
5. on node 5: [60000000000, 80000000000]
6. on node 2: [80000000000, a0000000000]
7. on node 3: [a0000000000, a0800000000]
8. on node 6: [c0000000000, a0800000000]
9. on node 7: [e0000000000, a0800000000]
And numa_cleanup_meminfo() will merge 1 and 2, and remove 8,9 because the
end address is over max_pfn, which is a0800000000. But 4 and 5 are not
removed because their end addresses are less then max_pfn. But in fact,
node 4 and 5 don't exist.
In a word, numa_cleanup_meminfo() is not able to handle holes between nodes.
Since memory ranges in node 4 and 5 are in numa_meminfo, in
numa_register_memblks(), node 4 and 5 will be mistakenly set to online.
If you run lscpu, it will show:
NUMA node0 CPU(s): 0-14,128-142
NUMA node1 CPU(s): 15-29,143-157
NUMA node2 CPU(s):
NUMA node3 CPU(s):
NUMA node4 CPU(s): 62-76,190-204
NUMA node5 CPU(s): 78-92,206-220
In this patch, we use memblock_overlaps_region() to check if ranges in
numa_meminfo overlap with ranges in memory_block. Since memory_block
contains all available memory at boot time, if they overlap, it means the
ranges exist. If not, then remove them from numa_meminfo.
After this patch, lscpu will show:
NUMA node0 CPU(s): 0-14,128-142
NUMA node1 CPU(s): 15-29,143-157
NUMA node4 CPU(s): 62-76,190-204
NUMA node5 CPU(s): 78-92,206-220
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tejun Heo <tj@kernel.org>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Fabian Frederick <fabf@skynet.be>
Cc: Alexander Kuleshov <kuleshovmail@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-09 01:02:03 +03:00
|
|
|
bool memblock_overlaps_region(struct memblock_type *type,
|
|
|
|
phys_addr_t base, phys_addr_t size);
|
2014-01-22 03:49:23 +04:00
|
|
|
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
|
|
|
|
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
|
2015-06-25 02:58:12 +03:00
|
|
|
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
|
2015-11-30 15:28:15 +03:00
|
|
|
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
|
2017-04-03 05:23:54 +03:00
|
|
|
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags choose_memblock_flags(void);
|
2014-01-29 21:16:01 +04:00
|
|
|
|
|
|
|
/* Low level functions */
|
|
|
|
int memblock_add_range(struct memblock_type *type,
|
|
|
|
phys_addr_t base, phys_addr_t size,
|
2018-06-30 17:55:01 +03:00
|
|
|
int nid, enum memblock_flags flags);
|
2014-01-29 21:16:01 +04:00
|
|
|
|
2018-06-30 17:55:01 +03:00
|
|
|
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
|
2015-06-25 02:58:09 +03:00
|
|
|
struct memblock_type *type_a,
|
2014-01-29 21:16:01 +04:00
|
|
|
struct memblock_type *type_b, phys_addr_t *out_start,
|
|
|
|
phys_addr_t *out_end, int *out_nid);
|
|
|
|
|
2018-06-30 17:55:01 +03:00
|
|
|
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
|
2015-06-25 02:58:09 +03:00
|
|
|
struct memblock_type *type_a,
|
2014-01-29 21:16:01 +04:00
|
|
|
struct memblock_type *type_b, phys_addr_t *out_start,
|
|
|
|
phys_addr_t *out_end, int *out_nid);
|
|
|
|
|
memblock: introduce a for_each_reserved_mem_region iterator
Struct page initialisation had been identified as one of the reasons why
large machines take a long time to boot. Patches were posted a long time ago
to defer initialisation until they were first used. This was rejected on
the grounds it should not be necessary to hurt the fast paths. This series
reuses much of the work from that time but defers the initialisation of
memory to kswapd so that one thread per node initialises memory local to
that node.
After applying the series and setting the appropriate Kconfig variable I
see this in the boot log on a 64G machine
[ 7.383764] kswapd 0 initialised deferred memory in 188ms
[ 7.404253] kswapd 1 initialised deferred memory in 208ms
[ 7.411044] kswapd 3 initialised deferred memory in 216ms
[ 7.411551] kswapd 2 initialised deferred memory in 216ms
On a 1TB machine, I see
[ 8.406511] kswapd 3 initialised deferred memory in 1116ms
[ 8.428518] kswapd 1 initialised deferred memory in 1140ms
[ 8.435977] kswapd 0 initialised deferred memory in 1148ms
[ 8.437416] kswapd 2 initialised deferred memory in 1148ms
Once booted the machine appears to work as normal. Boot times were measured
from the time shutdown was called until ssh was available again. In the
64G case, the boot time savings are negligible. On the 1TB machine, the
savings were 16 seconds.
Nate Zimmer said:
: On an older 8 TB box with lots and lots of cpus the boot time, as
: measure from grub to login prompt, the boot time improved from 1484
: seconds to exactly 1000 seconds.
Waiman Long said:
: I ran a bootup timing test on a 12-TB 16-socket IvyBridge-EX system. From
: grub menu to ssh login, the bootup time was 453s before the patch and 265s
: after the patch - a saving of 188s (42%).
Daniel Blueman said:
: On a 7TB, 1728-core NumaConnect system with 108 NUMA nodes, we're seeing
: stock 4.0 boot in 7136s. This drops to 2159s, or a 70% reduction with
: this patchset. Non-temporal PMD init (https://lkml.org/lkml/2015/4/23/350)
: drops this to 1045s.
This patch (of 13):
As part of initializing struct page's in 2MiB chunks, we noticed that at
the end of free_all_bootmem(), there was nothing which had forced the
reserved/allocated 4KiB pages to be initialized.
This helper function will be used for that expansion.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Nate Zimmer <nzimmer@sgi.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Tested-by: Nate Zimmer <nzimmer@sgi.com>
Tested-by: Waiman Long <waiman.long@hp.com>
Tested-by: Daniel J Blueman <daniel@numascale.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-07-01 00:56:41 +03:00
|
|
|
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
|
2016-07-27 01:24:47 +03:00
|
|
|
phys_addr_t *out_end);
|
memblock: introduce a for_each_reserved_mem_region iterator
Struct page initialisation had been identified as one of the reasons why
large machines take a long time to boot. Patches were posted a long time ago
to defer initialisation until they were first used. This was rejected on
the grounds it should not be necessary to hurt the fast paths. This series
reuses much of the work from that time but defers the initialisation of
memory to kswapd so that one thread per node initialises memory local to
that node.
After applying the series and setting the appropriate Kconfig variable I
see this in the boot log on a 64G machine
[ 7.383764] kswapd 0 initialised deferred memory in 188ms
[ 7.404253] kswapd 1 initialised deferred memory in 208ms
[ 7.411044] kswapd 3 initialised deferred memory in 216ms
[ 7.411551] kswapd 2 initialised deferred memory in 216ms
On a 1TB machine, I see
[ 8.406511] kswapd 3 initialised deferred memory in 1116ms
[ 8.428518] kswapd 1 initialised deferred memory in 1140ms
[ 8.435977] kswapd 0 initialised deferred memory in 1148ms
[ 8.437416] kswapd 2 initialised deferred memory in 1148ms
Once booted the machine appears to work as normal. Boot times were measured
from the time shutdown was called until ssh was available again. In the
64G case, the boot time savings are negligible. On the 1TB machine, the
savings were 16 seconds.
Nate Zimmer said:
: On an older 8 TB box with lots and lots of cpus the boot time, as
: measure from grub to login prompt, the boot time improved from 1484
: seconds to exactly 1000 seconds.
Waiman Long said:
: I ran a bootup timing test on a 12-TB 16-socket IvyBridge-EX system. From
: grub menu to ssh login, the bootup time was 453s before the patch and 265s
: after the patch - a saving of 188s (42%).
Daniel Blueman said:
: On a 7TB, 1728-core NumaConnect system with 108 NUMA nodes, we're seeing
: stock 4.0 boot in 7136s. This drops to 2159s, or a 70% reduction with
: this patchset. Non-temporal PMD init (https://lkml.org/lkml/2015/4/23/350)
: drops this to 1045s.
This patch (of 13):
As part of initializing struct page's in 2MiB chunks, we noticed that at
the end of free_all_bootmem(), there was nothing which had forced the
reserved/allocated 4KiB pages to be initialized.
This helper function will be used for that expansion.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Nate Zimmer <nzimmer@sgi.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Tested-by: Nate Zimmer <nzimmer@sgi.com>
Tested-by: Waiman Long <waiman.long@hp.com>
Tested-by: Daniel J Blueman <daniel@numascale.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-07-01 00:56:41 +03:00
|
|
|
|
2017-08-19 01:16:05 +03:00
|
|
|
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
|
|
|
|
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
|
|
|
|
|
2014-01-29 21:16:01 +04:00
|
|
|
/**
|
|
|
|
* for_each_mem_range - iterate through memblock areas from type_a and not
|
|
|
|
* included in type_b. Or just type_a if type_b is NULL.
|
|
|
|
* @i: u64 used as loop variable
|
|
|
|
* @type_a: ptr to memblock_type to iterate
|
|
|
|
* @type_b: ptr to memblock_type which excludes from the iteration
|
|
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
2015-06-25 02:58:09 +03:00
|
|
|
* @flags: pick from blocks based on memory attributes
|
2014-01-29 21:16:01 +04:00
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
|
|
*/
|
2015-06-25 02:58:09 +03:00
|
|
|
#define for_each_mem_range(i, type_a, type_b, nid, flags, \
|
2014-01-29 21:16:01 +04:00
|
|
|
p_start, p_end, p_nid) \
|
2015-06-25 02:58:09 +03:00
|
|
|
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
|
2014-01-29 21:16:01 +04:00
|
|
|
p_start, p_end, p_nid); \
|
|
|
|
i != (u64)ULLONG_MAX; \
|
2015-06-25 02:58:09 +03:00
|
|
|
__next_mem_range(&i, nid, flags, type_a, type_b, \
|
2014-01-29 21:16:01 +04:00
|
|
|
p_start, p_end, p_nid))
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_mem_range_rev - reverse iterate through memblock areas from
|
|
|
|
* type_a and not included in type_b. Or just type_a if type_b is NULL.
|
|
|
|
* @i: u64 used as loop variable
|
|
|
|
* @type_a: ptr to memblock_type to iterate
|
|
|
|
* @type_b: ptr to memblock_type which excludes from the iteration
|
|
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
2015-06-25 02:58:09 +03:00
|
|
|
* @flags: pick from blocks based on memory attributes
|
2014-01-29 21:16:01 +04:00
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
|
|
*/
|
2015-06-25 02:58:09 +03:00
|
|
|
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
|
2014-01-29 21:16:01 +04:00
|
|
|
p_start, p_end, p_nid) \
|
|
|
|
for (i = (u64)ULLONG_MAX, \
|
2015-06-25 02:58:09 +03:00
|
|
|
__next_mem_range_rev(&i, nid, flags, type_a, type_b,\
|
2016-07-27 01:24:47 +03:00
|
|
|
p_start, p_end, p_nid); \
|
2014-01-29 21:16:01 +04:00
|
|
|
i != (u64)ULLONG_MAX; \
|
2015-06-25 02:58:09 +03:00
|
|
|
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
|
2014-01-29 21:16:01 +04:00
|
|
|
p_start, p_end, p_nid))
|
|
|
|
|
memblock: introduce a for_each_reserved_mem_region iterator
Struct page initialisation had been identified as one of the reasons why
large machines take a long time to boot. Patches were posted a long time ago
to defer initialisation until they were first used. This was rejected on
the grounds it should not be necessary to hurt the fast paths. This series
reuses much of the work from that time but defers the initialisation of
memory to kswapd so that one thread per node initialises memory local to
that node.
After applying the series and setting the appropriate Kconfig variable I
see this in the boot log on a 64G machine
[ 7.383764] kswapd 0 initialised deferred memory in 188ms
[ 7.404253] kswapd 1 initialised deferred memory in 208ms
[ 7.411044] kswapd 3 initialised deferred memory in 216ms
[ 7.411551] kswapd 2 initialised deferred memory in 216ms
On a 1TB machine, I see
[ 8.406511] kswapd 3 initialised deferred memory in 1116ms
[ 8.428518] kswapd 1 initialised deferred memory in 1140ms
[ 8.435977] kswapd 0 initialised deferred memory in 1148ms
[ 8.437416] kswapd 2 initialised deferred memory in 1148ms
Once booted the machine appears to work as normal. Boot times were measured
from the time shutdown was called until ssh was available again. In the
64G case, the boot time savings are negligible. On the 1TB machine, the
savings were 16 seconds.
Nate Zimmer said:
: On an older 8 TB box with lots and lots of cpus the boot time, as
: measure from grub to login prompt, the boot time improved from 1484
: seconds to exactly 1000 seconds.
Waiman Long said:
: I ran a bootup timing test on a 12-TB 16-socket IvyBridge-EX system. From
: grub menu to ssh login, the bootup time was 453s before the patch and 265s
: after the patch - a saving of 188s (42%).
Daniel Blueman said:
: On a 7TB, 1728-core NumaConnect system with 108 NUMA nodes, we're seeing
: stock 4.0 boot in 7136s. This drops to 2159s, or a 70% reduction with
: this patchset. Non-temporal PMD init (https://lkml.org/lkml/2015/4/23/350)
: drops this to 1045s.
This patch (of 13):
As part of initializing struct page's in 2MiB chunks, we noticed that at
the end of free_all_bootmem(), there was nothing which had forced the
reserved/allocated 4KiB pages to be initialized.
This helper function will be used for that expansion.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Nate Zimmer <nzimmer@sgi.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Tested-by: Nate Zimmer <nzimmer@sgi.com>
Tested-by: Waiman Long <waiman.long@hp.com>
Tested-by: Daniel J Blueman <daniel@numascale.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-07-01 00:56:41 +03:00
|
|
|
/**
|
|
|
|
* for_each_reserved_mem_region - iterate over all reserved memblock areas
|
|
|
|
* @i: u64 used as loop variable
|
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
*
|
|
|
|
* Walks over reserved areas of memblock. Available as soon as memblock
|
|
|
|
* is initialized.
|
|
|
|
*/
|
|
|
|
#define for_each_reserved_mem_region(i, p_start, p_end) \
|
2016-07-27 01:24:47 +03:00
|
|
|
for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
|
memblock: introduce a for_each_reserved_mem_region iterator
Struct page initialisation had been identified as one of the reasons why
large machines take a long time to boot. Patches were posted a long time ago
to defer initialisation until they were first used. This was rejected on
the grounds it should not be necessary to hurt the fast paths. This series
reuses much of the work from that time but defers the initialisation of
memory to kswapd so that one thread per node initialises memory local to
that node.
After applying the series and setting the appropriate Kconfig variable I
see this in the boot log on a 64G machine
[ 7.383764] kswapd 0 initialised deferred memory in 188ms
[ 7.404253] kswapd 1 initialised deferred memory in 208ms
[ 7.411044] kswapd 3 initialised deferred memory in 216ms
[ 7.411551] kswapd 2 initialised deferred memory in 216ms
On a 1TB machine, I see
[ 8.406511] kswapd 3 initialised deferred memory in 1116ms
[ 8.428518] kswapd 1 initialised deferred memory in 1140ms
[ 8.435977] kswapd 0 initialised deferred memory in 1148ms
[ 8.437416] kswapd 2 initialised deferred memory in 1148ms
Once booted the machine appears to work as normal. Boot times were measured
from the time shutdown was called until ssh was available again. In the
64G case, the boot time savings are negligible. On the 1TB machine, the
savings were 16 seconds.
Nate Zimmer said:
: On an older 8 TB box with lots and lots of cpus the boot time, as
: measure from grub to login prompt, the boot time improved from 1484
: seconds to exactly 1000 seconds.
Waiman Long said:
: I ran a bootup timing test on a 12-TB 16-socket IvyBridge-EX system. From
: grub menu to ssh login, the bootup time was 453s before the patch and 265s
: after the patch - a saving of 188s (42%).
Daniel Blueman said:
: On a 7TB, 1728-core NumaConnect system with 108 NUMA nodes, we're seeing
: stock 4.0 boot in 7136s. This drops to 2159s, or a 70% reduction with
: this patchset. Non-temporal PMD init (https://lkml.org/lkml/2015/4/23/350)
: drops this to 1045s.
This patch (of 13):
As part of initializing struct page's in 2MiB chunks, we noticed that at
the end of free_all_bootmem(), there was nothing which had forced the
reserved/allocated 4KiB pages to be initialized.
This helper function will be used for that expansion.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Nate Zimmer <nzimmer@sgi.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Tested-by: Nate Zimmer <nzimmer@sgi.com>
Tested-by: Waiman Long <waiman.long@hp.com>
Tested-by: Daniel J Blueman <daniel@numascale.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-07-01 00:56:41 +03:00
|
|
|
i != (u64)ULLONG_MAX; \
|
|
|
|
__next_reserved_mem_region(&i, p_start, p_end))
|
|
|
|
|
2014-01-22 03:49:35 +04:00
|
|
|
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
|
|
|
|
{
|
|
|
|
return m->flags & MEMBLOCK_HOTPLUG;
|
|
|
|
}
|
|
|
|
|
2015-06-25 02:58:12 +03:00
|
|
|
static inline bool memblock_is_mirror(struct memblock_region *m)
|
|
|
|
{
|
|
|
|
return m->flags & MEMBLOCK_MIRROR;
|
|
|
|
}
|
|
|
|
|
2015-11-30 15:28:15 +03:00
|
|
|
static inline bool memblock_is_nomap(struct memblock_region *m)
|
|
|
|
{
|
|
|
|
return m->flags & MEMBLOCK_NOMAP;
|
|
|
|
}
|
|
|
|
|
2011-12-08 22:22:09 +04:00
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
2013-09-12 01:22:17 +04:00
|
|
|
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
|
|
|
|
unsigned long *end_pfn);
|
2011-12-08 22:22:09 +04:00
|
|
|
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
|
|
|
unsigned long *out_end_pfn, int *out_nid);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_mem_pfn_range - early memory pfn range iterator
|
|
|
|
* @i: an integer used as loop variable
|
|
|
|
* @nid: node selector, %MAX_NUMNODES for all nodes
|
|
|
|
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
|
|
|
|
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
|
|
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
|
|
*
|
2012-10-09 03:32:24 +04:00
|
|
|
* Walks over configured memory ranges.
|
2011-12-08 22:22:09 +04:00
|
|
|
*/
|
|
|
|
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
|
|
|
|
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
|
|
|
|
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
|
|
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
2011-07-12 13:15:59 +04:00
|
|
|
/**
|
|
|
|
* for_each_free_mem_range - iterate through free memblock areas
|
|
|
|
* @i: u64 used as loop variable
|
2014-01-22 03:50:16 +04:00
|
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
2016-01-15 02:22:04 +03:00
|
|
|
* @flags: pick from blocks based on memory attributes
|
2011-07-12 13:15:59 +04:00
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
|
|
*
|
|
|
|
* Walks over free (memory && !reserved) areas of memblock. Available as
|
|
|
|
* soon as memblock is initialized.
|
|
|
|
*/
|
2015-06-25 02:58:09 +03:00
|
|
|
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
|
2014-01-29 21:16:01 +04:00
|
|
|
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
|
2015-06-25 02:58:09 +03:00
|
|
|
nid, flags, p_start, p_end, p_nid)
|
2011-12-08 22:22:09 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
|
|
|
|
* @i: u64 used as loop variable
|
2014-01-22 03:50:16 +04:00
|
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
2016-01-15 02:22:04 +03:00
|
|
|
* @flags: pick from blocks based on memory attributes
|
2011-12-08 22:22:09 +04:00
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
|
|
*
|
|
|
|
* Walks over free (memory && !reserved) areas of memblock in reverse
|
|
|
|
* order. Available as soon as memblock is initialized.
|
|
|
|
*/
|
2015-06-25 02:58:09 +03:00
|
|
|
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
|
|
|
|
p_nid) \
|
2014-01-29 21:16:01 +04:00
|
|
|
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
|
2015-06-25 02:58:09 +03:00
|
|
|
nid, flags, p_start, p_end, p_nid)
|
2011-12-08 22:22:09 +04:00
|
|
|
|
mm: zero reserved and unavailable struct pages
Some memory is reserved but unavailable: not present in memblock.memory
(because not backed by physical pages), but present in memblock.reserved.
Such memory has backing struct pages, but they are not initialized by
going through __init_single_page().
In some cases these struct pages are accessed even if they do not
contain any data. One example is page_to_pfn() might access page->flags
if this is where section information is stored (CONFIG_SPARSEMEM,
SECTION_IN_PAGE_FLAGS).
One example of such memory: trim_low_memory_range() unconditionally
reserves from pfn 0, but e820__memblock_setup() might provide the
exiting memory from pfn 1 (i.e. KVM).
Since struct pages are zeroed in __init_single_page(), and not during
allocation time, we must zero such struct pages explicitly.
The patch involves adding a new memblock iterator:
for_each_resv_unavail_range(i, p_start, p_end)
Which iterates through reserved && !memory lists, and we zero struct pages
explicitly by calling mm_zero_struct_page().
===
Here is more detailed example of problem that this patch is addressing:
Run tested on qemu with the following arguments:
-enable-kvm -cpu kvm64 -m 512 -smp 2
This patch reports that there are 98 unavailable pages.
They are: pfn 0 and pfns in range [159, 255].
Note, trim_low_memory_range() reserves only pfns in range [0, 15], it does
not reserve [159, 255] ones.
e820__memblock_setup() reports linux that the following physical ranges are
available:
[1 , 158]
[256, 130783]
Notice, that exactly unavailable pfns are missing!
Now, lets check what we have in zone 0: [1, 131039]
pfn 0, is not part of the zone, but pfns [1, 158], are.
However, the bigger problem we have if we do not initialize these struct
pages is with memory hotplug. Because, that path operates at 2M
boundaries (section_nr). And checks if 2M range of pages is hot
removable. It starts with first pfn from zone, rounds it down to 2M
boundary (sturct pages are allocated at 2M boundaries when vmemmap is
created), and checks if that section is hot removable. In this case
start with pfn 1 and convert it down to pfn 0. Later pfn is converted
to struct page, and some fields are checked. Now, if we do not zero
struct pages, we get unpredictable results.
In fact when CONFIG_VM_DEBUG is enabled, and we explicitly set all
vmemmap memory to ones, the following panic is observed with kernel test
without this patch applied:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: is_pageblock_removable_nolock+0x35/0x90
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT
...
task: ffff88001f4e2900 task.stack: ffffc90000314000
RIP: 0010:is_pageblock_removable_nolock+0x35/0x90
Call Trace:
? is_mem_section_removable+0x5a/0xd0
show_mem_removable+0x6b/0xa0
dev_attr_show+0x1b/0x50
sysfs_kf_seq_show+0xa1/0x100
kernfs_seq_show+0x22/0x30
seq_read+0x1ac/0x3a0
kernfs_fop_read+0x36/0x190
? security_file_permission+0x90/0xb0
__vfs_read+0x16/0x30
vfs_read+0x81/0x130
SyS_read+0x44/0xa0
entry_SYSCALL_64_fastpath+0x1f/0xbd
Link: http://lkml.kernel.org/r/20171013173214.27300-7-pasha.tatashin@oracle.com
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: Steven Sistare <steven.sistare@oracle.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Tested-by: Bob Picco <bob.picco@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-11-16 04:36:31 +03:00
|
|
|
/**
|
|
|
|
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
|
|
|
|
* @i: u64 used as loop variable
|
|
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
|
|
*
|
|
|
|
* Walks over unavailable but reserved (reserved && !memory) areas of memblock.
|
|
|
|
* Available as soon as memblock is initialized.
|
|
|
|
* Note: because this memory does not belong to any physical node, flags and
|
|
|
|
* nid arguments do not make sense and thus not exported as arguments.
|
|
|
|
*/
|
|
|
|
#define for_each_resv_unavail_range(i, p_start, p_end) \
|
|
|
|
for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
|
|
|
|
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
|
|
|
|
|
2014-01-22 03:49:23 +04:00
|
|
|
static inline void memblock_set_region_flags(struct memblock_region *r,
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags flags)
|
2014-01-22 03:49:23 +04:00
|
|
|
{
|
|
|
|
r->flags |= flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void memblock_clear_region_flags(struct memblock_region *r,
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags flags)
|
2014-01-22 03:49:23 +04:00
|
|
|
{
|
|
|
|
r->flags &= ~flags;
|
|
|
|
}
|
|
|
|
|
2011-07-14 13:43:42 +04:00
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
2014-01-22 03:49:26 +04:00
|
|
|
int memblock_set_node(phys_addr_t base, phys_addr_t size,
|
|
|
|
struct memblock_type *type, int nid);
|
2011-07-14 13:43:42 +04:00
|
|
|
|
|
|
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
|
|
|
{
|
|
|
|
r->nid = nid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
|
|
|
{
|
|
|
|
return r->nid;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
2011-12-08 22:22:06 +04:00
|
|
|
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
|
|
|
|
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
|
2010-07-07 02:39:17 +04:00
|
|
|
|
2011-12-08 22:22:06 +04:00
|
|
|
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
2010-07-07 02:39:01 +04:00
|
|
|
|
2013-11-13 03:07:59 +04:00
|
|
|
/*
|
|
|
|
* Set the allocation direction to bottom-up or top-down.
|
|
|
|
*/
|
2014-08-07 03:05:03 +04:00
|
|
|
static inline void __init memblock_set_bottom_up(bool enable)
|
2013-11-13 03:07:59 +04:00
|
|
|
{
|
|
|
|
memblock.bottom_up = enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the allocation direction is bottom-up or not.
|
|
|
|
* if this is true, that said, memblock will allocate memory
|
|
|
|
* in bottom-up direction.
|
|
|
|
*/
|
|
|
|
static inline bool memblock_bottom_up(void)
|
|
|
|
{
|
|
|
|
return memblock.bottom_up;
|
|
|
|
}
|
|
|
|
|
2010-07-07 02:39:01 +04:00
|
|
|
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
|
2010-08-04 07:34:42 +04:00
|
|
|
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
2010-07-07 02:39:01 +04:00
|
|
|
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
|
|
|
|
2014-06-05 03:06:53 +04:00
|
|
|
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
|
2015-06-25 02:58:09 +03:00
|
|
|
phys_addr_t start, phys_addr_t end,
|
2018-06-30 17:55:01 +03:00
|
|
|
enum memblock_flags flags);
|
2018-02-13 18:08:15 +03:00
|
|
|
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
|
|
|
|
phys_addr_t align, phys_addr_t max_addr,
|
2018-06-30 17:55:01 +03:00
|
|
|
int nid, enum memblock_flags flags);
|
2011-12-08 22:22:06 +04:00
|
|
|
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
|
|
|
phys_addr_t max_addr);
|
|
|
|
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
|
|
|
phys_addr_t max_addr);
|
|
|
|
phys_addr_t memblock_phys_mem_size(void);
|
2016-10-08 02:59:18 +03:00
|
|
|
phys_addr_t memblock_reserved_size(void);
|
2013-01-25 00:20:09 +04:00
|
|
|
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
|
2011-12-08 22:22:06 +04:00
|
|
|
phys_addr_t memblock_start_of_DRAM(void);
|
|
|
|
phys_addr_t memblock_end_of_DRAM(void);
|
|
|
|
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
|
2017-04-03 05:23:55 +03:00
|
|
|
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
|
2016-07-29 01:48:26 +03:00
|
|
|
void memblock_mem_limit_remove_map(phys_addr_t limit);
|
2016-01-15 02:18:54 +03:00
|
|
|
bool memblock_is_memory(phys_addr_t addr);
|
2018-02-07 02:41:18 +03:00
|
|
|
bool memblock_is_map_memory(phys_addr_t addr);
|
|
|
|
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
2016-01-15 02:18:54 +03:00
|
|
|
bool memblock_is_reserved(phys_addr_t addr);
|
2015-09-09 01:02:00 +03:00
|
|
|
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
2011-12-08 22:22:06 +04:00
|
|
|
|
2011-12-08 22:22:06 +04:00
|
|
|
extern void __memblock_dump_all(void);
|
|
|
|
|
|
|
|
static inline void memblock_dump_all(void)
|
|
|
|
{
|
|
|
|
if (memblock_debug)
|
|
|
|
__memblock_dump_all();
|
|
|
|
}
|
2010-07-12 08:36:09 +04:00
|
|
|
|
2010-07-07 02:39:01 +04:00
|
|
|
/**
|
|
|
|
* memblock_set_current_limit - Set the current allocation limit to allow
|
|
|
|
* limiting allocations to what is currently
|
|
|
|
* accessible during boot
|
|
|
|
* @limit: New limit value (physical address)
|
|
|
|
*/
|
2011-12-08 22:22:06 +04:00
|
|
|
void memblock_set_current_limit(phys_addr_t limit);
|
2010-07-07 02:39:01 +04:00
|
|
|
|
2010-07-07 02:38:58 +04:00
|
|
|
|
2014-02-27 04:23:43 +04:00
|
|
|
phys_addr_t memblock_get_current_limit(void);
|
|
|
|
|
2010-08-04 07:40:38 +04:00
|
|
|
/*
|
|
|
|
* pfn conversion functions
|
|
|
|
*
|
|
|
|
* While the memory MEMBLOCKs should always be page aligned, the reserved
|
|
|
|
* MEMBLOCKs may not be. This accessor attempt to provide a very clear
|
|
|
|
* idea of what they return for such non aligned MEMBLOCKs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2018-06-30 17:55:02 +03:00
|
|
|
* memblock_region_memory_base_pfn - get the lowest pfn of the memory region
|
2010-08-04 07:40:38 +04:00
|
|
|
* @reg: memblock_region structure
|
2018-06-30 17:55:02 +03:00
|
|
|
*
|
|
|
|
* Return: the lowest pfn intersecting with the memory region
|
2010-08-04 07:40:38 +04:00
|
|
|
*/
|
2010-10-13 01:07:09 +04:00
|
|
|
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
|
2010-08-04 07:40:38 +04:00
|
|
|
{
|
2010-10-13 01:07:09 +04:00
|
|
|
return PFN_UP(reg->base);
|
2010-08-04 07:40:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-06-30 17:55:02 +03:00
|
|
|
* memblock_region_memory_end_pfn - get the end pfn of the memory region
|
2010-08-04 07:40:38 +04:00
|
|
|
* @reg: memblock_region structure
|
2018-06-30 17:55:02 +03:00
|
|
|
*
|
|
|
|
* Return: the end_pfn of the reserved region
|
2010-08-04 07:40:38 +04:00
|
|
|
*/
|
2010-10-13 01:07:09 +04:00
|
|
|
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
|
2010-08-04 07:40:38 +04:00
|
|
|
{
|
2010-10-13 01:07:09 +04:00
|
|
|
return PFN_DOWN(reg->base + reg->size);
|
2010-08-04 07:40:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-06-30 17:55:02 +03:00
|
|
|
* memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
|
2010-08-04 07:40:38 +04:00
|
|
|
* @reg: memblock_region structure
|
2018-06-30 17:55:02 +03:00
|
|
|
*
|
|
|
|
* Return: the lowest pfn intersecting with the reserved region
|
2010-08-04 07:40:38 +04:00
|
|
|
*/
|
2010-10-13 01:07:09 +04:00
|
|
|
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
|
2010-08-04 07:40:38 +04:00
|
|
|
{
|
2010-10-13 01:07:09 +04:00
|
|
|
return PFN_DOWN(reg->base);
|
2010-08-04 07:40:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-06-30 17:55:02 +03:00
|
|
|
* memblock_region_reserved_end_pfn - get the end pfn of the reserved region
|
2010-08-04 07:40:38 +04:00
|
|
|
* @reg: memblock_region structure
|
2018-06-30 17:55:02 +03:00
|
|
|
*
|
|
|
|
* Return: the end_pfn of the reserved region
|
2010-08-04 07:40:38 +04:00
|
|
|
*/
|
2010-10-13 01:07:09 +04:00
|
|
|
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
|
2010-08-04 07:40:38 +04:00
|
|
|
{
|
2010-10-13 01:07:09 +04:00
|
|
|
return PFN_UP(reg->base + reg->size);
|
2010-08-04 07:40:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define for_each_memblock(memblock_type, region) \
|
2016-07-27 01:24:47 +03:00
|
|
|
for (region = memblock.memblock_type.regions; \
|
2010-08-04 07:40:38 +04:00
|
|
|
region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
|
|
|
|
region++)
|
|
|
|
|
2017-11-16 04:33:42 +03:00
|
|
|
#define for_each_memblock_type(i, memblock_type, rgn) \
|
|
|
|
for (i = 0, rgn = &memblock_type->regions[0]; \
|
|
|
|
i < memblock_type->cnt; \
|
|
|
|
i++, rgn = &memblock_type->regions[i])
|
2010-08-04 07:40:38 +04:00
|
|
|
|
2015-04-15 01:48:27 +03:00
|
|
|
#ifdef CONFIG_MEMTEST
|
2015-04-15 01:48:30 +03:00
|
|
|
extern void early_memtest(phys_addr_t start, phys_addr_t end);
|
2015-04-15 01:48:27 +03:00
|
|
|
#else
|
2015-04-15 01:48:30 +03:00
|
|
|
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
|
2015-04-15 01:48:27 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2011-05-25 04:13:19 +04:00
|
|
|
#else
|
|
|
|
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
|
|
|
{
|
2011-07-12 11:58:09 +04:00
|
|
|
return 0;
|
2011-05-25 04:13:19 +04:00
|
|
|
}
|
2010-07-28 09:28:21 +04:00
|
|
|
#endif /* CONFIG_HAVE_MEMBLOCK */
|
|
|
|
|
2010-07-12 08:36:09 +04:00
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
#endif /* _LINUX_MEMBLOCK_H */
|