arm64: kexec: skip relocation code for inplace kexec

In case of kdump or when segments are already in place the relocation
is not needed, therefore the setup of relocation function and call to
it can be skipped.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Suggested-by: James Morse <james.morse@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210930143113.1502553-6-pasha.tatashin@soleen.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Pasha Tatashin 2021-09-30 14:31:03 +00:00 коммит произвёл Will Deacon
Родитель 0d8732e461
Коммит 5bb6834fc2
2 изменённых файлов: 21 добавлений и 16 удалений

Просмотреть файл

@ -144,16 +144,16 @@ int machine_kexec_post_load(struct kimage *kimage)
{ {
void *reloc_code = page_to_virt(kimage->control_code_page); void *reloc_code = page_to_virt(kimage->control_code_page);
/* If in place flush new kernel image, else flush lists and buffers */ /* If in place, relocation is not used, only flush next kernel */
if (kimage->head & IND_DONE) if (kimage->head & IND_DONE) {
kexec_segment_flush(kimage); kexec_segment_flush(kimage);
else kexec_image_info(kimage);
kexec_list_flush(kimage); return 0;
}
memcpy(reloc_code, arm64_relocate_new_kernel, memcpy(reloc_code, arm64_relocate_new_kernel,
arm64_relocate_new_kernel_size); arm64_relocate_new_kernel_size);
kimage->arch.kern_reloc = __pa(reloc_code); kimage->arch.kern_reloc = __pa(reloc_code);
kexec_image_info(kimage);
/* Flush the reloc_code in preparation for its execution. */ /* Flush the reloc_code in preparation for its execution. */
dcache_clean_inval_poc((unsigned long)reloc_code, dcache_clean_inval_poc((unsigned long)reloc_code,
@ -162,6 +162,8 @@ int machine_kexec_post_load(struct kimage *kimage)
icache_inval_pou((uintptr_t)reloc_code, icache_inval_pou((uintptr_t)reloc_code,
(uintptr_t)reloc_code + (uintptr_t)reloc_code +
arm64_relocate_new_kernel_size); arm64_relocate_new_kernel_size);
kexec_list_flush(kimage);
kexec_image_info(kimage);
return 0; return 0;
} }
@ -188,19 +190,25 @@ void machine_kexec(struct kimage *kimage)
local_daif_mask(); local_daif_mask();
/* /*
* cpu_soft_restart will shutdown the MMU, disable data caches, then * Both restart and cpu_soft_restart will shutdown the MMU, disable data
* transfer control to the kern_reloc which contains a copy of * caches. However, restart will start new kernel or purgatory directly,
* the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel * cpu_soft_restart will transfer control to arm64_relocate_new_kernel
* uses physical addressing to relocate the new image to its final
* position and transfers control to the image entry point when the
* relocation is complete.
* In kexec case, kimage->start points to purgatory assuming that * In kexec case, kimage->start points to purgatory assuming that
* kernel entry and dtb address are embedded in purgatory by * kernel entry and dtb address are embedded in purgatory by
* userspace (kexec-tools). * userspace (kexec-tools).
* In kexec_file case, the kernel starts directly without purgatory. * In kexec_file case, the kernel starts directly without purgatory.
*/ */
cpu_soft_restart(kimage->arch.kern_reloc, kimage->head, kimage->start, if (kimage->head & IND_DONE) {
kimage->arch.dtb_mem); typeof(__cpu_soft_restart) *restart;
cpu_install_idmap();
restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart));
restart(is_hyp_nvhe(), kimage->start, kimage->arch.dtb_mem,
0, 0);
} else {
cpu_soft_restart(kimage->arch.kern_reloc, kimage->head,
kimage->start, kimage->arch.dtb_mem);
}
BUG(); /* Should never get here. */ BUG(); /* Should never get here. */
} }

Просмотреть файл

@ -32,8 +32,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
mov x16, x0 /* x16 = kimage_head */ mov x16, x0 /* x16 = kimage_head */
mov x14, xzr /* x14 = entry ptr */ mov x14, xzr /* x14 = entry ptr */
mov x13, xzr /* x13 = copy dest */ mov x13, xzr /* x13 = copy dest */
/* Check if the new image needs relocation. */
tbnz x16, IND_DONE_BIT, .Ldone
raw_dcache_line_size x15, x1 /* x15 = dcache line size */ raw_dcache_line_size x15, x1 /* x15 = dcache line size */
.Lloop: .Lloop:
and x12, x16, PAGE_MASK /* x12 = addr */ and x12, x16, PAGE_MASK /* x12 = addr */
@ -65,7 +63,6 @@ SYM_CODE_START(arm64_relocate_new_kernel)
.Lnext: .Lnext:
ldr x16, [x14], #8 /* entry = *ptr++ */ ldr x16, [x14], #8 /* entry = *ptr++ */
tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */
.Ldone:
/* wait for writes from copy_page to finish */ /* wait for writes from copy_page to finish */
dsb nsh dsb nsh
ic iallu ic iallu