x86/asm/64: Add ENDs to some functions and relabel with SYM_CODE_*

All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END appropriately too.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [power mgmt]
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: platform-driver-x86@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20191011115108.12392-23-jslaby@suse.cz
This commit is contained in:
Jiri Slaby 2019-10-11 13:51:02 +02:00 коммит произвёл Borislav Petkov
Родитель f13ad88a98
Коммит 4aec216b93
7 изменённых файлов: 25 добавлений и 12 удалений

Просмотреть файл

@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)
.code64 .code64
.org 0x200 .org 0x200
ENTRY(startup_64) SYM_CODE_START(startup_64)
/* /*
* 64bit entry is 0x200 and it is ABI so immutable! * 64bit entry is 0x200 and it is ABI so immutable!
* We come here either from startup_32 or directly from a * We come here either from startup_32 or directly from a
@ -442,6 +442,7 @@ trampoline_return:
*/ */
leaq .Lrelocated(%rbx), %rax leaq .Lrelocated(%rbx), %rax
jmp *%rax jmp *%rax
SYM_CODE_END(startup_64)
#ifdef CONFIG_EFI_STUB #ifdef CONFIG_EFI_STUB
@ -571,7 +572,7 @@ SYM_FUNC_END(.Lrelocated)
* ECX contains the base address of the trampoline memory. * ECX contains the base address of the trampoline memory.
* Non zero RDX means trampoline needs to enable 5-level paging. * Non zero RDX means trampoline needs to enable 5-level paging.
*/ */
ENTRY(trampoline_32bit_src) SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */ /* Set up data and stack segments */
movl $__KERNEL_DS, %eax movl $__KERNEL_DS, %eax
movl %eax, %ds movl %eax, %ds
@ -634,6 +635,7 @@ ENTRY(trampoline_32bit_src)
movl %eax, %cr0 movl %eax, %cr0
lret lret
SYM_CODE_END(trampoline_32bit_src)
.code64 .code64
SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)

Просмотреть файл

@ -90,7 +90,7 @@ restore_registers:
ret ret
ENTRY(do_olpc_suspend_lowlevel) SYM_CODE_START(do_olpc_suspend_lowlevel)
call save_processor_state call save_processor_state
call save_registers call save_registers
@ -110,6 +110,7 @@ ret_point:
call restore_registers call restore_registers
call restore_processor_state call restore_processor_state
ret ret
SYM_CODE_END(do_olpc_suspend_lowlevel)
.data .data
saved_gdt: .long 0,0 saved_gdt: .long 0,0

Просмотреть файл

@ -52,7 +52,7 @@ ENTRY(swsusp_arch_suspend)
ret ret
ENDPROC(swsusp_arch_suspend) ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image) SYM_CODE_START(restore_image)
/* prepare to jump to the image kernel */ /* prepare to jump to the image kernel */
movq restore_jump_address(%rip), %r8 movq restore_jump_address(%rip), %r8
movq restore_cr3(%rip), %r9 movq restore_cr3(%rip), %r9
@ -67,9 +67,10 @@ ENTRY(restore_image)
/* jump to relocated restore code */ /* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx movq relocated_restore_code(%rip), %rcx
jmpq *%rcx jmpq *%rcx
SYM_CODE_END(restore_image)
/* code below has been relocated to a safe page */ /* code below has been relocated to a safe page */
ENTRY(core_restore_code) SYM_CODE_START(core_restore_code)
/* switch to temporary page tables */ /* switch to temporary page tables */
movq %rax, %cr3 movq %rax, %cr3
/* flush TLB */ /* flush TLB */
@ -97,6 +98,7 @@ ENTRY(core_restore_code)
.Ldone: .Ldone:
/* jump to the restore_registers address from the image header */ /* jump to the restore_registers address from the image header */
jmpq *%r8 jmpq *%r8
SYM_CODE_END(core_restore_code)
/* code below belongs to the image kernel */ /* code below belongs to the image kernel */
.align PAGE_SIZE .align PAGE_SIZE

Просмотреть файл

@ -19,7 +19,7 @@
*/ */
.section ".text32", "ax" .section ".text32", "ax"
.code32 .code32
ENTRY(machine_real_restart_asm) SYM_CODE_START(machine_real_restart_asm)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* Switch to trampoline GDT as it is guaranteed < 4 GiB */ /* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
movl %ecx, %gs movl %ecx, %gs
movl %ecx, %ss movl %ecx, %ss
ljmpw $8, $1f ljmpw $8, $1f
SYM_CODE_END(machine_real_restart_asm)
/* /*
* This is 16-bit protected mode code to disable paging and the cache, * This is 16-bit protected mode code to disable paging and the cache,

Просмотреть файл

@ -38,7 +38,7 @@
.code16 .code16
.balign PAGE_SIZE .balign PAGE_SIZE
ENTRY(trampoline_start) SYM_CODE_START(trampoline_start)
cli # We should be safe anyway cli # We should be safe anyway
wbinvd wbinvd
@ -78,12 +78,14 @@ ENTRY(trampoline_start)
no_longmode: no_longmode:
hlt hlt
jmp no_longmode jmp no_longmode
SYM_CODE_END(trampoline_start)
#include "../kernel/verify_cpu.S" #include "../kernel/verify_cpu.S"
.section ".text32","ax" .section ".text32","ax"
.code32 .code32
.balign 4 .balign 4
ENTRY(startup_32) SYM_CODE_START(startup_32)
movl %edx, %ss movl %edx, %ss
addl $pa_real_mode_base, %esp addl $pa_real_mode_base, %esp
movl %edx, %ds movl %edx, %ds
@ -137,13 +139,15 @@ ENTRY(startup_32)
* the new gdt/idt that has __KERNEL_CS with CS.L = 1. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/ */
ljmpl $__KERNEL_CS, $pa_startup_64 ljmpl $__KERNEL_CS, $pa_startup_64
SYM_CODE_END(startup_32)
.section ".text64","ax" .section ".text64","ax"
.code64 .code64
.balign 4 .balign 4
ENTRY(startup_64) SYM_CODE_START(startup_64)
# Now jump into the kernel using virtual addresses # Now jump into the kernel using virtual addresses
jmpq *tr_start(%rip) jmpq *tr_start(%rip)
SYM_CODE_END(startup_64)
.section ".rodata","a" .section ".rodata","a"
# Duplicate the global descriptor table # Duplicate the global descriptor table

Просмотреть файл

@ -37,7 +37,7 @@ SYM_DATA_END(wakeup_header)
.code16 .code16
.balign 16 .balign 16
ENTRY(wakeup_start) SYM_CODE_START(wakeup_start)
cli cli
cld cld
@ -135,6 +135,7 @@ ENTRY(wakeup_start)
#else #else
jmp trampoline_start jmp trampoline_start
#endif #endif
SYM_CODE_END(wakeup_start)
bogus_real_magic: bogus_real_magic:
1: 1:

Просмотреть файл

@ -85,11 +85,12 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* r11 }<-- pushed by hypercall page * r11 }<-- pushed by hypercall page
* rsp->rax } * rsp->rax }
*/ */
ENTRY(xen_iret) SYM_CODE_START(xen_iret)
pushq $0 pushq $0
jmp hypercall_iret jmp hypercall_iret
SYM_CODE_END(xen_iret)
ENTRY(xen_sysret64) SYM_CODE_START(xen_sysret64)
/* /*
* We're already on the usermode stack at this point, but * We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back. * still with the kernel gs, so we can easily switch back.
@ -107,6 +108,7 @@ ENTRY(xen_sysret64)
pushq $VGCF_in_syscall pushq $VGCF_in_syscall
jmp hypercall_iret jmp hypercall_iret
SYM_CODE_END(xen_sysret64)
/* /*
* Xen handles syscall callbacks much like ordinary exceptions, which * Xen handles syscall callbacks much like ordinary exceptions, which