arm64 fixes for -rc4
- Fix two boot issues caused by the recent head.S rework when !KASLR - Fix calculation of crashkernel memory reservation - Fix bogus error check in PMU IRQ probing code -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmMRwE8QHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNP6lB/4m0NNn4PQJ5FmTBuNurwB+CvZyC/pl/exL reXG0hwI+ApX1xj6aD7tZ3tB+mi6EG0+3PBLuJ4KuSNecwwT/au5ZxAipsc+IRs0 EI4loeItzTaevaVTyydsTMgXYAJlCHlMDxEI0QIW3/datH10bZ1GzhEDL/Ebflfu Mo4g0/+eysGmSHtj7kL5IU4bE+CCXnXycphgCLAn0qcdxuF/iZphu/GgB4Az6gSS 9ks3HJkO+X1UVb8qK0prVDsalX2z5tTWbxQ7XACJ4KEVEiSqh7ei3B283cF+Zeip 3PSg6gkPC5YZmLjv431B/pAycsx/x8KeXY0yDAadTRmOQYROMm0j =a7xW -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "It's a lot smaller than last week, with the star of the show being a couple of fixes to head.S addressing a boot regression introduced by the recent overhaul of that code in non-default configurations (i.e. KASLR disabled). The first of those two resolves the issue reported (and bisected) by Mikulus in the wait_on_bit() thread. Summary: - Fix two boot issues caused by the recent head.S rework when !KASLR - Fix calculation of crashkernel memory reservation - Fix bogus error check in PMU IRQ probing code" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mm: Reserve enough pages for the initial ID map perf/arm_pmu_platform: fix tests for platform_get_irq() failure arm64: head: Ignore bogus KASLR displacement on non-relocatable kernels arm64/kexec: Fix missing extra range for crashkres_low.
This commit is contained in:
Коммит
cf3488fa25
|
@ -64,28 +64,28 @@
|
|||
#define EARLY_KASLR (0)
|
||||
#endif
|
||||
|
||||
#define EARLY_ENTRIES(vstart, vend, shift) \
|
||||
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
|
||||
#define EARLY_ENTRIES(vstart, vend, shift, add) \
|
||||
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
|
||||
|
||||
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
|
||||
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
|
||||
|
||||
#if SWAPPER_PGTABLE_LEVELS > 3
|
||||
#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
|
||||
#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
|
||||
#else
|
||||
#define EARLY_PUDS(vstart, vend) (0)
|
||||
#define EARLY_PUDS(vstart, vend, add) (0)
|
||||
#endif
|
||||
|
||||
#if SWAPPER_PGTABLE_LEVELS > 2
|
||||
#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
|
||||
#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
|
||||
#else
|
||||
#define EARLY_PMDS(vstart, vend) (0)
|
||||
#define EARLY_PMDS(vstart, vend, add) (0)
|
||||
#endif
|
||||
|
||||
#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
|
||||
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
|
||||
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
|
||||
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
|
||||
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
|
||||
#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
|
||||
+ EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
|
||||
+ EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
|
||||
+ EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
|
||||
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
|
||||
|
||||
/* the initial ID map may need two extra pages if it needs to be extended */
|
||||
#if VA_BITS < 48
|
||||
|
@ -93,7 +93,7 @@
|
|||
#else
|
||||
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
|
||||
#endif
|
||||
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
|
||||
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
|
||||
|
||||
/* Initial memory map size */
|
||||
#if ARM64_KERNEL_USES_PMD_MAPS
|
||||
|
|
|
@ -371,7 +371,9 @@ SYM_FUNC_END(create_idmap)
|
|||
SYM_FUNC_START_LOCAL(create_kernel_mapping)
|
||||
adrp x0, init_pg_dir
|
||||
mov_q x5, KIMAGE_VADDR // compile time __va(_text)
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
add x5, x5, x23 // add KASLR displacement
|
||||
#endif
|
||||
adrp x6, _end // runtime __pa(_end)
|
||||
adrp x3, _text // runtime __pa(_text)
|
||||
sub x6, x6, x3 // _end - _text
|
||||
|
|
|
@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
|
|||
u64 i;
|
||||
phys_addr_t start, end;
|
||||
|
||||
nr_ranges = 1; /* for exclusion of crashkernel region */
|
||||
nr_ranges = 2; /* for exclusion of crashkernel region */
|
||||
for_each_mem_range(i, &start, &end)
|
||||
nr_ranges++;
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
|
|||
|
||||
if (num_irqs == 1) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
if (irq && irq_is_percpu_devid(irq))
|
||||
if ((irq > 0) && irq_is_percpu_devid(irq))
|
||||
return pmu_parse_percpu_irq(pmu, irq);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче