2019-05-27 09:55:01 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-09-26 10:04:21 +04:00
|
|
|
/*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
|
|
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/init.c"
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/initrd.h>
|
|
|
|
#include <linux/pagemap.h>
|
2010-07-12 08:36:09 +04:00
|
|
|
#include <linux/memblock.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/gfp.h>
|
2011-06-28 13:54:48 +04:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/hugetlb.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/btext.h>
|
|
|
|
#include <asm/tlb.h>
|
2005-10-06 06:23:33 +04:00
|
|
|
#include <asm/sections.h>
|
2011-06-28 13:54:48 +04:00
|
|
|
#include <asm/hugetlb.h>
|
2019-04-18 09:51:18 +03:00
|
|
|
#include <asm/kup.h>
|
2019-04-26 19:23:34 +03:00
|
|
|
#include <asm/kasan.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2019-03-29 12:59:59 +03:00
|
|
|
#include <mm/mmu_decl.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
|
2010-01-20 19:02:24 +03:00
|
|
|
/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
|
2008-12-17 13:09:13 +03:00
|
|
|
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
|
2013-05-29 11:55:47 +04:00
|
|
|
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START"
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
|
|
|
|
|
2008-07-09 19:09:23 +04:00
|
|
|
phys_addr_t total_memory;
|
|
|
|
phys_addr_t total_lowmem;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2016-07-13 04:14:40 +03:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
powerpc: Define virtual-physical translations for RELOCATABLE
We find the runtime address of _stext and relocate ourselves based
on the following calculation.
virtual_base = ALIGN(KERNELBASE,KERNEL_TLB_PIN_SIZE) +
MODULO(_stext.run,KERNEL_TLB_PIN_SIZE)
relocate() is called with the Effective Virtual Base Address (as
shown below)
| Phys. Addr| Virt. Addr |
Page |------------------------|
Boundary | | |
| | |
| | |
Kernel Load |___________|_ __ _ _ _ _|<- Effective
Addr(_stext)| | ^ |Virt. Base Addr
| | | |
| | | |
| |reloc_offset|
| | | |
| | | |
| |______v_____|<-(KERNELBASE)%TLB_SIZE
| | |
| | |
| | |
Page |-----------|------------|
Boundary | | |
On BookE, we need __va() & __pa() early in the boot process to access
the device tree.
Currently this has been defined as :
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) -
PHYSICAL_START + KERNELBASE)
where:
PHYSICAL_START is kernstart_addr - a variable updated at runtime.
KERNELBASE is the compile time Virtual base address of kernel.
This won't work for us, as kernstart_addr is dynamic and will yield different
results for __va()/__pa() for same mapping.
e.g.,
Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as
PAGE_OFFSET).
In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
= 0xbc100000 , which is wrong.
it should be : 0xc0000000 + 0x100000 = 0xc0100000
On platforms which support AMP, like PPC_47x (based on 44x), the kernel
could be loaded at highmem. Hence we cannot always depend on the compile
time constants for mapping.
Here are the possible solutions:
1) Update kernstart_addr(PHSYICAL_START) to match the Physical address of
compile time KERNELBASE value, instead of the actual Physical_Address(_stext).
The disadvantage is that we may break other users of PHYSICAL_START. They
could be replaced with __pa(_stext).
2) Redefine __va() & __pa() with relocation offset
#ifdef CONFIG_RELOCATABLE_PPC32
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + (KERNELBASE + RELOC_OFFSET)))
#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - (KERNELBASE + RELOC_OFFSET))
#endif
where, RELOC_OFFSET could be
a) A variable, say relocation_offset (like kernstart_addr), updated
at boot time. This impacts performance, as we have to load an additional
variable from memory.
OR
b) #define RELOC_OFFSET ((PHYSICAL_START & PPC_PIN_SIZE_OFFSET_MASK) - \
(KERNELBASE & PPC_PIN_SIZE_OFFSET_MASK))
This introduces more calculations for doing the translation.
3) Redefine __va() & __pa() with a new variable
i.e,
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
where VIRT_PHYS_OFFSET :
#ifdef CONFIG_RELOCATABLE_PPC32
#define VIRT_PHYS_OFFSET virt_phys_offset
#else
#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
#endif /* CONFIG_RELOCATABLE_PPC32 */
where virt_phy_offset is updated at runtime to :
Effective KERNELBASE - kernstart_addr.
Taking our example, above:
virt_phys_offset = effective_kernelstart_vaddr - kernstart_addr
= 0xc0400000 - 0x400000
= 0xc0000000
and
__va(0x100000) = 0xc0000000 + 0x100000 = 0xc0100000
which is what we want.
I have implemented (3) in the following patch which has same cost of
operation as the existing one.
I have tested the patches on 440x platforms only. However this should
work fine for PPC_47x also, as we only depend on the runtime address
and the current TLB XLAT entry for the startup code, which is available
in r25. I don't have access to a 47x board yet. So, it would be great if
somebody could test this on 47x.
Signed-off-by: Suzuki K. Poulose <suzuki@in.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Kumar Gala <galak@kernel.crashing.org>
Cc: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Signed-off-by: Josh Boyer <jwboyer@gmail.com>
2011-12-15 02:58:37 +04:00
|
|
|
/* Used in __va()/__pa() */
|
|
|
|
long long virt_phys_offset;
|
|
|
|
EXPORT_SYMBOL(virt_phys_offset);
|
|
|
|
#endif
|
|
|
|
|
2008-04-15 23:52:21 +04:00
|
|
|
phys_addr_t lowmem_end_addr;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
int boot_mapsize;
|
|
|
|
#ifdef CONFIG_PPC_PMAC
|
|
|
|
unsigned long agp_special_page;
|
2005-10-22 08:42:51 +04:00
|
|
|
EXPORT_SYMBOL(agp_special_page);
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
void MMU_init(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this tells the system to map all of ram with the segregs
|
|
|
|
* (i.e. page tables) instead of the bats.
|
|
|
|
* -- Cort
|
|
|
|
*/
|
|
|
|
int __map_without_bats;
|
|
|
|
int __map_without_ltlbs;
|
|
|
|
|
|
|
|
/* max amount of low RAM to map in */
|
|
|
|
unsigned long __max_low_memory = MAX_LOW_MEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for command-line options that affect what MMU_init will do.
|
|
|
|
*/
|
2018-03-07 23:32:55 +03:00
|
|
|
static void __init MMU_setup(void)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
|
|
|
/* Check for nobats option (used in mapin_ram). */
|
2014-09-17 08:39:36 +04:00
|
|
|
if (strstr(boot_command_line, "nobats")) {
|
2005-09-26 10:04:21 +04:00
|
|
|
__map_without_bats = 1;
|
|
|
|
}
|
|
|
|
|
2014-09-17 08:39:36 +04:00
|
|
|
if (strstr(boot_command_line, "noltlbs")) {
|
2005-09-26 10:04:21 +04:00
|
|
|
__map_without_ltlbs = 1;
|
|
|
|
}
|
2016-03-18 00:17:59 +03:00
|
|
|
if (debug_pagealloc_enabled()) {
|
|
|
|
__map_without_bats = 1;
|
|
|
|
__map_without_ltlbs = 1;
|
|
|
|
}
|
2019-02-21 22:08:51 +03:00
|
|
|
if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
|
2017-08-02 16:51:05 +03:00
|
|
|
__map_without_ltlbs = 1;
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MMU_init sets up the basic memory mappings for the kernel,
|
|
|
|
* including both RAM and possibly some I/O regions,
|
|
|
|
* and sets up the page tables and the MMU hardware ready to go.
|
|
|
|
*/
|
|
|
|
void __init MMU_init(void)
|
|
|
|
{
|
|
|
|
if (ppc_md.progress)
|
|
|
|
ppc_md.progress("MMU:enter", 0x111);
|
|
|
|
|
|
|
|
/* parse args from command line */
|
|
|
|
MMU_setup();
|
|
|
|
|
2011-06-28 13:54:48 +04:00
|
|
|
/*
|
|
|
|
* Reserve gigantic pages for hugetlb. This MUST occur before
|
|
|
|
* lowmem_end_addr is initialized below.
|
|
|
|
*/
|
2010-07-12 08:36:09 +04:00
|
|
|
if (memblock.memory.cnt > 1) {
|
2009-12-12 09:31:53 +03:00
|
|
|
#ifndef CONFIG_WII
|
2011-12-08 22:22:07 +04:00
|
|
|
memblock_enforce_memory_limit(memblock.memory.regions[0].size);
|
2016-09-12 13:12:24 +03:00
|
|
|
pr_warn("Only using first contiguous memory region\n");
|
2009-12-12 09:31:53 +03:00
|
|
|
#else
|
|
|
|
wii_memory_fixups();
|
|
|
|
#endif
|
2005-10-06 06:23:33 +04:00
|
|
|
}
|
|
|
|
|
2010-07-12 08:36:09 +04:00
|
|
|
total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
|
2008-04-15 23:52:22 +04:00
|
|
|
lowmem_end_addr = memstart_addr + total_lowmem;
|
2005-10-06 06:23:33 +04:00
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
#ifdef CONFIG_FSL_BOOKE
|
|
|
|
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
|
|
|
|
* entries, so we need to adjust lowmem to match the amount we can map
|
|
|
|
* in the fixed entries */
|
|
|
|
adjust_total_lowmem();
|
|
|
|
#endif /* CONFIG_FSL_BOOKE */
|
2005-10-26 15:54:21 +04:00
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
if (total_lowmem > __max_low_memory) {
|
|
|
|
total_lowmem = __max_low_memory;
|
2008-04-15 23:52:22 +04:00
|
|
|
lowmem_end_addr = memstart_addr + total_lowmem;
|
2005-09-26 10:04:21 +04:00
|
|
|
#ifndef CONFIG_HIGHMEM
|
|
|
|
total_memory = total_lowmem;
|
2011-01-27 13:30:44 +03:00
|
|
|
memblock_enforce_memory_limit(total_lowmem);
|
2005-09-26 10:04:21 +04:00
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the MMU hardware */
|
|
|
|
if (ppc_md.progress)
|
|
|
|
ppc_md.progress("MMU:hw init", 0x300);
|
|
|
|
MMU_init_hw();
|
|
|
|
|
|
|
|
/* Map in all of RAM starting at KERNELBASE */
|
|
|
|
if (ppc_md.progress)
|
|
|
|
ppc_md.progress("MMU:mapin", 0x301);
|
|
|
|
mapin_ram();
|
|
|
|
|
2009-05-27 07:44:50 +04:00
|
|
|
/* Initialize early top-down ioremap allocator */
|
|
|
|
ioremap_bot = IOREMAP_TOP;
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
if (ppc_md.progress)
|
|
|
|
ppc_md.progress("MMU:exit", 0x211);
|
2005-11-23 09:57:25 +03:00
|
|
|
|
|
|
|
/* From now on, btext is no longer BAT mapped if it was at all */
|
|
|
|
#ifdef CONFIG_BOOTX_TEXT
|
|
|
|
btext_unmap();
|
|
|
|
#endif
|
2010-07-07 02:39:01 +04:00
|
|
|
|
2019-04-18 09:51:18 +03:00
|
|
|
setup_kup();
|
|
|
|
|
2010-07-07 02:39:01 +04:00
|
|
|
/* Shortly after that, the entire linear mapping will be available */
|
|
|
|
memblock_set_current_limit(lowmem_end_addr);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|