2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
|
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
|
|
|
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
|
|
|
|
* Copyright (C) 1999 VA Linux Systems
|
|
|
|
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
|
|
|
|
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Routines used by ia64 machines with contiguous (or virtually contiguous)
|
|
|
|
* memory.
|
|
|
|
*/
|
|
|
|
#include <linux/efi.h>
|
2011-12-08 22:22:08 +04:00
|
|
|
#include <linux/memblock.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/mm.h>
|
2007-08-22 22:34:38 +04:00
|
|
|
#include <linux/nmi.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/swap.h>
|
2020-12-15 06:09:47 +03:00
|
|
|
#include <linux/sizes.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/meminit.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/mca.h>
|
|
|
|
|
|
|
|
/* physical address where the bootmem map is located */
|
|
|
|
unsigned long bootmap_start;
|
|
|
|
|
2007-11-07 02:14:45 +03:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void *cpu_data;
|
|
|
|
/**
|
|
|
|
* per_cpu_init - setup per-cpu variables
|
|
|
|
*
|
|
|
|
* Allocate and setup per-cpu data areas.
|
|
|
|
*/
|
2013-06-17 23:51:20 +04:00
|
|
|
void *per_cpu_init(void)
|
2007-11-07 02:14:45 +03:00
|
|
|
{
|
2009-10-02 08:28:56 +04:00
|
|
|
static bool first_time = true;
|
|
|
|
void *cpu0_data = __cpu0_per_cpu;
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
if (!first_time)
|
|
|
|
goto skip;
|
|
|
|
first_time = false;
|
2007-11-07 02:14:45 +03:00
|
|
|
|
|
|
|
/*
|
2009-10-02 08:28:56 +04:00
|
|
|
* get_free_pages() cannot be used before cpu_init() done.
|
|
|
|
* BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
|
|
|
|
* to avoid that AP calls get_zeroed_page().
|
2007-11-07 02:14:45 +03:00
|
|
|
*/
|
2009-10-02 08:28:56 +04:00
|
|
|
for_each_possible_cpu(cpu) {
|
2009-10-02 08:28:56 +04:00
|
|
|
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
|
|
|
|
|
|
|
|
memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
|
|
|
|
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
|
|
|
|
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* percpu area for cpu0 is moved from the __init area
|
|
|
|
* which is setup by head.S and used till this point.
|
|
|
|
* Update ar.k3. This move is ensures that percpu
|
|
|
|
* area for cpu0 is on the correct node and its
|
|
|
|
* virtual address isn't insanely far from other
|
|
|
|
* percpu areas which is important for congruent
|
|
|
|
* percpu allocator.
|
|
|
|
*/
|
|
|
|
if (cpu == 0)
|
|
|
|
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
|
|
|
|
(unsigned long)__per_cpu_start);
|
|
|
|
|
|
|
|
cpu_data += PERCPU_PAGE_SIZE;
|
2007-11-07 02:14:45 +03:00
|
|
|
}
|
2009-10-02 08:28:56 +04:00
|
|
|
skip:
|
2007-11-07 02:14:45 +03:00
|
|
|
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
alloc_per_cpu_data(void)
|
|
|
|
{
|
2019-03-12 09:30:00 +03:00
|
|
|
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
|
|
|
|
|
|
|
|
cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
|
2018-10-31 01:09:03 +03:00
|
|
|
__pa(MAX_DMA_ADDRESS));
|
2019-03-12 09:30:00 +03:00
|
|
|
if (!cpu_data)
|
|
|
|
panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
|
|
|
|
__func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
2007-11-07 02:14:45 +03:00
|
|
|
}
|
2009-10-02 08:28:56 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* setup_per_cpu_areas - setup percpu areas
|
|
|
|
*
|
|
|
|
* Arch code has already allocated and initialized percpu areas. All
|
|
|
|
* this function has to do is to teach the determined layout to the
|
|
|
|
* dynamic percpu allocator, which happens to be more complex than
|
|
|
|
* creating whole new ones using helpers.
|
|
|
|
*/
|
|
|
|
void __init
|
|
|
|
setup_per_cpu_areas(void)
|
|
|
|
{
|
|
|
|
struct pcpu_alloc_info *ai;
|
|
|
|
struct pcpu_group_info *gi;
|
|
|
|
unsigned int cpu;
|
|
|
|
ssize_t static_size, reserved_size, dyn_size;
|
|
|
|
|
|
|
|
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
|
|
|
|
if (!ai)
|
|
|
|
panic("failed to allocate pcpu_alloc_info");
|
|
|
|
gi = &ai->groups[0];
|
|
|
|
|
|
|
|
/* units are assigned consecutively to possible cpus */
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
gi->cpu_map[gi->nr_units++] = cpu;
|
|
|
|
|
|
|
|
/* set parameters */
|
|
|
|
static_size = __per_cpu_end - __per_cpu_start;
|
|
|
|
reserved_size = PERCPU_MODULE_RESERVE;
|
|
|
|
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
|
|
|
|
if (dyn_size < 0)
|
|
|
|
panic("percpu area overflow static=%zd reserved=%zd\n",
|
|
|
|
static_size, reserved_size);
|
|
|
|
|
|
|
|
ai->static_size = static_size;
|
|
|
|
ai->reserved_size = reserved_size;
|
|
|
|
ai->dyn_size = dyn_size;
|
|
|
|
ai->unit_size = PERCPU_PAGE_SIZE;
|
|
|
|
ai->atom_size = PAGE_SIZE;
|
|
|
|
ai->alloc_size = PERCPU_PAGE_SIZE;
|
|
|
|
|
2019-07-03 11:25:52 +03:00
|
|
|
pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
|
2009-10-02 08:28:56 +04:00
|
|
|
pcpu_free_alloc_info(ai);
|
|
|
|
}
|
2007-11-07 02:14:45 +03:00
|
|
|
#else
|
|
|
|
#define alloc_per_cpu_data() do { } while (0)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/**
|
|
|
|
* find_memory - setup memory map
|
|
|
|
*
|
|
|
|
* Walk the EFI memory map and find usable memory for the system, taking
|
|
|
|
* into account reserved areas.
|
|
|
|
*/
|
2006-03-23 03:54:15 +03:00
|
|
|
void __init
|
2005-04-17 02:20:36 +04:00
|
|
|
find_memory (void)
|
|
|
|
{
|
|
|
|
reserve_memory();
|
|
|
|
|
|
|
|
/* first find highest page frame number */
|
[IA64] min_low_pfn and max_low_pfn calculation fix
We have seen bad_pte_print when testing crashdump on an SN machine in
recent 2.6.20 kernel. There are tons of bad pte print (pfn < max_low_pfn)
reports when the crash kernel boots up, all those reported bad pages
are inside initmem range; That is because if the crash kernel code and
data happens to be at the beginning of the 1st node. build_node_maps in
discontig.c will bypass reserved regions with filter_rsvd_memory. Since
min_low_pfn is calculated in build_node_map, so in this case, min_low_pfn
will be greater than kernel code and data.
Because pages inside initmem are freed and reused later, we saw
pfn_valid check fail on those pages.
I think this theoretically happen on a normal kernel. When I check
min_low_pfn and max_low_pfn calculation in contig.c and discontig.c.
I found more issues than this.
1. min_low_pfn and max_low_pfn calculation is inconsistent between
contig.c and discontig.c,
min_low_pfn is calculated as the first page number of boot memmap in
contig.c (Why? Though this may work at the most of the time, I don't
think it is the right logic). It is calculated as the lowest physical
memory page number bypass reserved regions in discontig.c.
max_low_pfn is calculated include reserved regions in contig.c. It is
calculated exclude reserved regions in discontig.c.
2. If kernel code and data region is happen to be at the begin or the
end of physical memory, when min_low_pfn and max_low_pfn calculation is
bypassed kernel code and data, pages in initmem will report bad.
3. initrd is also in reserved regions, if it is at the begin or at the
end of physical memory, kernel will refuse to reuse the memory. Because
the virt_addr_valid check in free_initrd_mem.
So it is better to fix and clean up those issues.
Calculate min_low_pfn and max_low_pfn in a consistent way.
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Acked-by: Jay Lan <jlan@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2007-03-20 23:41:57 +03:00
|
|
|
min_low_pfn = ~0UL;
|
|
|
|
max_low_pfn = 0;
|
|
|
|
efi_memmap_walk(find_max_min_low_pfn, NULL);
|
|
|
|
max_pfn = max_low_pfn;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2018-07-23 08:56:58 +03:00
|
|
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
|
|
|
efi_memmap_walk(filter_memory, register_active_ranges);
|
|
|
|
#else
|
|
|
|
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
find_initrd();
|
2006-12-12 11:49:03 +03:00
|
|
|
|
2007-11-07 02:14:45 +03:00
|
|
|
alloc_per_cpu_data();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
static int __init find_largest_hole(u64 start, u64 end, void *arg)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2020-12-15 06:09:47 +03:00
|
|
|
u64 *max_gap = arg;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
static u64 last_end = PAGE_OFFSET;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
/* NOTE: this algorithm assumes efi memmap table is ordered */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
if (*max_gap < (start - last_end))
|
|
|
|
*max_gap = start - last_end;
|
|
|
|
last_end = end;
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
static void __init verify_gap_absence(void)
|
|
|
|
{
|
|
|
|
unsigned long max_gap;
|
|
|
|
|
|
|
|
/* Forbid FLATMEM if hole is > than 1G */
|
|
|
|
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
|
|
|
if (max_gap >= SZ_1G)
|
|
|
|
panic("Cannot use FLATMEM with %ldMB hole\n"
|
|
|
|
"Please switch over to SPARSEMEM\n",
|
|
|
|
(max_gap >> 20));
|
2020-12-15 06:09:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the page tables.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void __init
|
|
|
|
paging_init (void)
|
|
|
|
{
|
|
|
|
unsigned long max_dma;
|
|
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
|
|
|
|
|
|
|
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
|
|
|
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
|
|
|
max_zone_pfns[ZONE_DMA32] = max_dma;
|
|
|
|
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
|
|
|
|
2020-12-15 06:09:47 +03:00
|
|
|
verify_gap_absence();
|
2020-12-15 06:09:43 +03:00
|
|
|
|
2020-06-04 01:57:10 +03:00
|
|
|
free_area_init(max_zone_pfns);
|
2005-04-17 02:20:36 +04:00
|
|
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
|
|
|
}
|