Merge branch 'x86/asm' into x86/core, to prepare for new patch
Collect all changes to arch/x86/entry/entry_64.S, before applying patch that changes most of the file. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
9dda1658a9
|
@ -18,10 +18,10 @@ Some of these entries are:
|
|||
|
||||
- system_call: syscall instruction from 64-bit code.
|
||||
|
||||
- ia32_syscall: int 0x80 from 32-bit or 64-bit code; compat syscall
|
||||
- entry_INT80_compat: int 0x80 from 32-bit or 64-bit code; compat syscall
|
||||
either way.
|
||||
|
||||
- ia32_syscall, ia32_sysenter: syscall and sysenter from 32-bit
|
||||
- entry_INT80_compat, ia32_sysenter: syscall and sysenter from 32-bit
|
||||
code
|
||||
|
||||
- interrupt: An array of entries. Every IDT vector that doesn't
|
||||
|
|
|
@ -10893,7 +10893,7 @@ M: Andy Lutomirski <luto@amacapital.net>
|
|||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
|
||||
S: Maintained
|
||||
F: arch/x86/vdso/
|
||||
F: arch/x86/entry/vdso/
|
||||
|
||||
XC2028/3028 TUNER DRIVER
|
||||
M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
|
||||
obj-y += entry/
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm/
|
||||
|
||||
# Xen paravirtualization support
|
||||
|
@ -11,7 +14,7 @@ obj-y += kernel/
|
|||
obj-y += mm/
|
||||
|
||||
obj-y += crypto/
|
||||
obj-y += vdso/
|
||||
|
||||
obj-$(CONFIG_IA32_EMULATION) += ia32/
|
||||
|
||||
obj-y += platform/
|
||||
|
|
|
@ -149,12 +149,6 @@ endif
|
|||
sp-$(CONFIG_X86_32) := esp
|
||||
sp-$(CONFIG_X86_64) := rsp
|
||||
|
||||
# do binutils support CFI?
|
||||
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
|
||||
# is .cfi_signal_frame supported too?
|
||||
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
|
||||
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
|
||||
|
||||
# does binutils support specific instructions?
|
||||
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
|
||||
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
|
||||
|
@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
|
|||
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
|
||||
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
|
||||
|
||||
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
|
||||
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
|
||||
KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
|
||||
KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
|
||||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
|
||||
|
@ -187,7 +181,7 @@ archscripts: scripts_basic
|
|||
# Syscall table generation
|
||||
|
||||
archheaders:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/syscalls all
|
||||
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
|
||||
|
||||
archprepare:
|
||||
ifeq ($(CONFIG_KEXEC_FILE),y)
|
||||
|
@ -250,7 +244,7 @@ install:
|
|||
|
||||
PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/vdso $@
|
||||
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
|
||||
|
||||
archclean:
|
||||
$(Q)rm -rf $(objtree)/arch/i386
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
#
|
||||
# Makefile for the x86 low level entry code
|
||||
#
|
||||
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||
|
||||
obj-y += vdso/
|
||||
obj-y += vsyscall/
|
||||
|
||||
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
|
||||
|
|
@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
|
||||
*/
|
||||
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
|
@ -91,28 +89,27 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
#define SIZEOF_PTREGS 21*8
|
||||
|
||||
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
|
||||
subq $15*8+\addskip, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET 15*8+\addskip
|
||||
addq $-(15*8+\addskip), %rsp
|
||||
.endm
|
||||
|
||||
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
|
||||
.if \r11
|
||||
movq_cfi r11, 6*8+\offset
|
||||
movq %r11, 6*8+\offset(%rsp)
|
||||
.endif
|
||||
.if \r8910
|
||||
movq_cfi r10, 7*8+\offset
|
||||
movq_cfi r9, 8*8+\offset
|
||||
movq_cfi r8, 9*8+\offset
|
||||
movq %r10, 7*8+\offset(%rsp)
|
||||
movq %r9, 8*8+\offset(%rsp)
|
||||
movq %r8, 9*8+\offset(%rsp)
|
||||
.endif
|
||||
.if \rax
|
||||
movq_cfi rax, 10*8+\offset
|
||||
movq %rax, 10*8+\offset(%rsp)
|
||||
.endif
|
||||
.if \rcx
|
||||
movq_cfi rcx, 11*8+\offset
|
||||
movq %rcx, 11*8+\offset(%rsp)
|
||||
.endif
|
||||
movq_cfi rdx, 12*8+\offset
|
||||
movq_cfi rsi, 13*8+\offset
|
||||
movq_cfi rdi, 14*8+\offset
|
||||
movq %rdx, 12*8+\offset(%rsp)
|
||||
movq %rsi, 13*8+\offset(%rsp)
|
||||
movq %rdi, 14*8+\offset(%rsp)
|
||||
.endm
|
||||
.macro SAVE_C_REGS offset=0
|
||||
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
|
||||
|
@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
.endm
|
||||
|
||||
.macro SAVE_EXTRA_REGS offset=0
|
||||
movq_cfi r15, 0*8+\offset
|
||||
movq_cfi r14, 1*8+\offset
|
||||
movq_cfi r13, 2*8+\offset
|
||||
movq_cfi r12, 3*8+\offset
|
||||
movq_cfi rbp, 4*8+\offset
|
||||
movq_cfi rbx, 5*8+\offset
|
||||
movq %r15, 0*8+\offset(%rsp)
|
||||
movq %r14, 1*8+\offset(%rsp)
|
||||
movq %r13, 2*8+\offset(%rsp)
|
||||
movq %r12, 3*8+\offset(%rsp)
|
||||
movq %rbp, 4*8+\offset(%rsp)
|
||||
movq %rbx, 5*8+\offset(%rsp)
|
||||
.endm
|
||||
.macro SAVE_EXTRA_REGS_RBP offset=0
|
||||
movq_cfi rbp, 4*8+\offset
|
||||
movq %rbp, 4*8+\offset(%rsp)
|
||||
.endm
|
||||
|
||||
.macro RESTORE_EXTRA_REGS offset=0
|
||||
movq_cfi_restore 0*8+\offset, r15
|
||||
movq_cfi_restore 1*8+\offset, r14
|
||||
movq_cfi_restore 2*8+\offset, r13
|
||||
movq_cfi_restore 3*8+\offset, r12
|
||||
movq_cfi_restore 4*8+\offset, rbp
|
||||
movq_cfi_restore 5*8+\offset, rbx
|
||||
movq 0*8+\offset(%rsp), %r15
|
||||
movq 1*8+\offset(%rsp), %r14
|
||||
movq 2*8+\offset(%rsp), %r13
|
||||
movq 3*8+\offset(%rsp), %r12
|
||||
movq 4*8+\offset(%rsp), %rbp
|
||||
movq 5*8+\offset(%rsp), %rbx
|
||||
.endm
|
||||
|
||||
.macro ZERO_EXTRA_REGS
|
||||
|
@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
|
||||
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
|
||||
.if \rstor_r11
|
||||
movq_cfi_restore 6*8, r11
|
||||
movq 6*8(%rsp), %r11
|
||||
.endif
|
||||
.if \rstor_r8910
|
||||
movq_cfi_restore 7*8, r10
|
||||
movq_cfi_restore 8*8, r9
|
||||
movq_cfi_restore 9*8, r8
|
||||
movq 7*8(%rsp), %r10
|
||||
movq 8*8(%rsp), %r9
|
||||
movq 9*8(%rsp), %r8
|
||||
.endif
|
||||
.if \rstor_rax
|
||||
movq_cfi_restore 10*8, rax
|
||||
movq 10*8(%rsp), %rax
|
||||
.endif
|
||||
.if \rstor_rcx
|
||||
movq_cfi_restore 11*8, rcx
|
||||
movq 11*8(%rsp), %rcx
|
||||
.endif
|
||||
.if \rstor_rdx
|
||||
movq_cfi_restore 12*8, rdx
|
||||
movq 12*8(%rsp), %rdx
|
||||
.endif
|
||||
movq_cfi_restore 13*8, rsi
|
||||
movq_cfi_restore 14*8, rdi
|
||||
movq 13*8(%rsp), %rsi
|
||||
movq 14*8(%rsp), %rdi
|
||||
.endm
|
||||
.macro RESTORE_C_REGS
|
||||
RESTORE_C_REGS_HELPER 1,1,1,1,1
|
||||
|
@ -204,8 +201,7 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
.endm
|
||||
|
||||
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
|
||||
addq $15*8+\addskip, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
|
||||
subq $-(15*8+\addskip), %rsp
|
||||
.endm
|
||||
|
||||
.macro icebp
|
||||
|
@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
*/
|
||||
|
||||
.macro SAVE_ALL
|
||||
pushl_cfi_reg eax
|
||||
pushl_cfi_reg ebp
|
||||
pushl_cfi_reg edi
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg edx
|
||||
pushl_cfi_reg ecx
|
||||
pushl_cfi_reg ebx
|
||||
pushl %eax
|
||||
pushl %ebp
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %edx
|
||||
pushl %ecx
|
||||
pushl %ebx
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ALL
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg ecx
|
||||
popl_cfi_reg edx
|
||||
popl_cfi_reg esi
|
||||
popl_cfi_reg edi
|
||||
popl_cfi_reg ebp
|
||||
popl_cfi_reg eax
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %eax
|
||||
.endm
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -19,8 +19,6 @@
|
|||
* at the top of the kernel process stack.
|
||||
*
|
||||
* Some macro usage:
|
||||
* - CFI macros are used to generate dwarf2 unwind information for better
|
||||
* backtraces. They don't change any code.
|
||||
* - ENTRY/END Define functions in the symbol table.
|
||||
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
|
||||
* - idtentry - Define exception entry points.
|
||||
|
@ -30,8 +28,7 @@
|
|||
#include <asm/segment.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/calling.h>
|
||||
#include "calling.h"
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/unistd.h>
|
||||
|
@ -112,61 +109,6 @@ ENDPROC(native_usergs_sysret64)
|
|||
# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
|
||||
#endif
|
||||
|
||||
/*
|
||||
* empty frame
|
||||
*/
|
||||
.macro EMPTY_FRAME start=1 offset=0
|
||||
.if \start
|
||||
CFI_STARTPROC simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,8+\offset
|
||||
.else
|
||||
CFI_DEF_CFA_OFFSET 8+\offset
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* initial frame state for interrupts (and exceptions without error code)
|
||||
*/
|
||||
.macro INTR_FRAME start=1 offset=0
|
||||
EMPTY_FRAME \start, 5*8+\offset
|
||||
/*CFI_REL_OFFSET ss, 4*8+\offset*/
|
||||
CFI_REL_OFFSET rsp, 3*8+\offset
|
||||
/*CFI_REL_OFFSET rflags, 2*8+\offset*/
|
||||
/*CFI_REL_OFFSET cs, 1*8+\offset*/
|
||||
CFI_REL_OFFSET rip, 0*8+\offset
|
||||
.endm
|
||||
|
||||
/*
|
||||
* initial frame state for exceptions with error code (and interrupts
|
||||
* with vector already pushed)
|
||||
*/
|
||||
.macro XCPT_FRAME start=1 offset=0
|
||||
INTR_FRAME \start, 1*8+\offset
|
||||
.endm
|
||||
|
||||
/*
|
||||
* frame that enables passing a complete pt_regs to a C function.
|
||||
*/
|
||||
.macro DEFAULT_FRAME start=1 offset=0
|
||||
XCPT_FRAME \start, ORIG_RAX+\offset
|
||||
CFI_REL_OFFSET rdi, RDI+\offset
|
||||
CFI_REL_OFFSET rsi, RSI+\offset
|
||||
CFI_REL_OFFSET rdx, RDX+\offset
|
||||
CFI_REL_OFFSET rcx, RCX+\offset
|
||||
CFI_REL_OFFSET rax, RAX+\offset
|
||||
CFI_REL_OFFSET r8, R8+\offset
|
||||
CFI_REL_OFFSET r9, R9+\offset
|
||||
CFI_REL_OFFSET r10, R10+\offset
|
||||
CFI_REL_OFFSET r11, R11+\offset
|
||||
CFI_REL_OFFSET rbx, RBX+\offset
|
||||
CFI_REL_OFFSET rbp, RBP+\offset
|
||||
CFI_REL_OFFSET r12, R12+\offset
|
||||
CFI_REL_OFFSET r13, R13+\offset
|
||||
CFI_REL_OFFSET r14, R14+\offset
|
||||
CFI_REL_OFFSET r15, R15+\offset
|
||||
.endm
|
||||
|
||||
/*
|
||||
* 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
|
||||
*
|
||||
|
@ -195,13 +137,7 @@ ENDPROC(native_usergs_sysret64)
|
|||
* with them due to bugs in both AMD and Intel CPUs.
|
||||
*/
|
||||
|
||||
ENTRY(system_call)
|
||||
CFI_STARTPROC simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,0
|
||||
CFI_REGISTER rip,rcx
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
|
||||
ENTRY(entry_SYSCALL_64)
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
|
@ -213,14 +149,14 @@ ENTRY(system_call)
|
|||
* after the swapgs, so that it can do the swapgs
|
||||
* for the guest and jump here on syscall.
|
||||
*/
|
||||
GLOBAL(system_call_after_swapgs)
|
||||
GLOBAL(entry_SYSCALL_64_after_swapgs)
|
||||
|
||||
movq %rsp,PER_CPU_VAR(rsp_scratch)
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq_cfi $__USER_DS /* pt_regs->ss */
|
||||
pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
|
||||
pushq $__USER_DS /* pt_regs->ss */
|
||||
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
|
||||
/*
|
||||
* Re-enable interrupts.
|
||||
* We use 'rsp_scratch' as a scratch space, hence irq-off block above
|
||||
|
@ -229,26 +165,24 @@ GLOBAL(system_call_after_swapgs)
|
|||
* with using rsp_scratch:
|
||||
*/
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %r11 /* pt_regs->flags */
|
||||
pushq_cfi $__USER_CS /* pt_regs->cs */
|
||||
pushq_cfi %rcx /* pt_regs->ip */
|
||||
CFI_REL_OFFSET rip,0
|
||||
pushq_cfi_reg rax /* pt_regs->orig_ax */
|
||||
pushq_cfi_reg rdi /* pt_regs->di */
|
||||
pushq_cfi_reg rsi /* pt_regs->si */
|
||||
pushq_cfi_reg rdx /* pt_regs->dx */
|
||||
pushq_cfi_reg rcx /* pt_regs->cx */
|
||||
pushq_cfi $-ENOSYS /* pt_regs->ax */
|
||||
pushq_cfi_reg r8 /* pt_regs->r8 */
|
||||
pushq_cfi_reg r9 /* pt_regs->r9 */
|
||||
pushq_cfi_reg r10 /* pt_regs->r10 */
|
||||
pushq_cfi_reg r11 /* pt_regs->r11 */
|
||||
pushq %r11 /* pt_regs->flags */
|
||||
pushq $__USER_CS /* pt_regs->cs */
|
||||
pushq %rcx /* pt_regs->ip */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
pushq %r8 /* pt_regs->r8 */
|
||||
pushq %r9 /* pt_regs->r9 */
|
||||
pushq %r10 /* pt_regs->r10 */
|
||||
pushq %r11 /* pt_regs->r11 */
|
||||
sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
|
||||
CFI_ADJUST_CFA_OFFSET 6*8
|
||||
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz tracesys
|
||||
system_call_fastpath:
|
||||
entry_SYSCALL_64_fastpath:
|
||||
#if __SYSCALL_MASK == ~0
|
||||
cmpq $__NR_syscall_max,%rax
|
||||
#else
|
||||
|
@ -282,13 +216,9 @@ system_call_fastpath:
|
|||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
|
||||
RESTORE_C_REGS_EXCEPT_RCX_R11
|
||||
movq RIP(%rsp),%rcx
|
||||
CFI_REGISTER rip,rcx
|
||||
movq EFLAGS(%rsp),%r11
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
movq RSP(%rsp),%rsp
|
||||
/*
|
||||
* 64bit SYSRET restores rip from rcx,
|
||||
|
@ -307,8 +237,6 @@ system_call_fastpath:
|
|||
*/
|
||||
USERGS_SYSRET64
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
|
||||
/* Do syscall entry tracing */
|
||||
tracesys:
|
||||
movq %rsp, %rdi
|
||||
|
@ -318,7 +246,7 @@ tracesys:
|
|||
jnz tracesys_phase2 /* if needed, run the slow path */
|
||||
RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
|
||||
movq ORIG_RAX(%rsp), %rax
|
||||
jmp system_call_fastpath /* and return to the fast path */
|
||||
jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
|
||||
|
||||
tracesys_phase2:
|
||||
SAVE_EXTRA_REGS
|
||||
|
@ -374,9 +302,9 @@ int_careful:
|
|||
jnc int_very_careful
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %rdi
|
||||
pushq %rdi
|
||||
SCHEDULE_USER
|
||||
popq_cfi %rdi
|
||||
popq %rdi
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
jmp int_with_check
|
||||
|
@ -389,10 +317,10 @@ int_very_careful:
|
|||
/* Check for syscall exit trace */
|
||||
testl $_TIF_WORK_SYSCALL_EXIT,%edx
|
||||
jz int_signal
|
||||
pushq_cfi %rdi
|
||||
pushq %rdi
|
||||
leaq 8(%rsp),%rdi # &ptregs -> arg1
|
||||
call syscall_trace_leave
|
||||
popq_cfi %rdi
|
||||
popq %rdi
|
||||
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
|
||||
jmp int_restore_rest
|
||||
|
||||
|
@ -475,27 +403,21 @@ syscall_return:
|
|||
* perf profiles. Nothing jumps here.
|
||||
*/
|
||||
syscall_return_via_sysret:
|
||||
CFI_REMEMBER_STATE
|
||||
/* rcx and r11 are already restored (see code above) */
|
||||
RESTORE_C_REGS_EXCEPT_RCX_R11
|
||||
movq RSP(%rsp),%rsp
|
||||
USERGS_SYSRET64
|
||||
CFI_RESTORE_STATE
|
||||
|
||||
opportunistic_sysret_failed:
|
||||
SWAPGS
|
||||
jmp restore_c_regs_and_iret
|
||||
CFI_ENDPROC
|
||||
END(system_call)
|
||||
END(entry_SYSCALL_64)
|
||||
|
||||
|
||||
.macro FORK_LIKE func
|
||||
ENTRY(stub_\func)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8 /* offset 8: return address */
|
||||
SAVE_EXTRA_REGS 8
|
||||
jmp sys_\func
|
||||
CFI_ENDPROC
|
||||
END(stub_\func)
|
||||
.endm
|
||||
|
||||
|
@ -504,8 +426,6 @@ END(stub_\func)
|
|||
FORK_LIKE vfork
|
||||
|
||||
ENTRY(stub_execve)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
call sys_execve
|
||||
return_from_execve:
|
||||
testl %eax, %eax
|
||||
|
@ -515,11 +435,9 @@ return_from_execve:
|
|||
1:
|
||||
/* must use IRET code path (pt_regs->cs may have changed) */
|
||||
addq $8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
ZERO_EXTRA_REGS
|
||||
movq %rax,RAX(%rsp)
|
||||
jmp int_ret_from_sys_call
|
||||
CFI_ENDPROC
|
||||
END(stub_execve)
|
||||
/*
|
||||
* Remaining execve stubs are only 7 bytes long.
|
||||
|
@ -527,32 +445,23 @@ END(stub_execve)
|
|||
*/
|
||||
.align 8
|
||||
GLOBAL(stub_execveat)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
call sys_execveat
|
||||
jmp return_from_execve
|
||||
CFI_ENDPROC
|
||||
END(stub_execveat)
|
||||
|
||||
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
|
||||
.align 8
|
||||
GLOBAL(stub_x32_execve)
|
||||
GLOBAL(stub32_execve)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
call compat_sys_execve
|
||||
jmp return_from_execve
|
||||
CFI_ENDPROC
|
||||
END(stub32_execve)
|
||||
END(stub_x32_execve)
|
||||
.align 8
|
||||
GLOBAL(stub_x32_execveat)
|
||||
GLOBAL(stub32_execveat)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
call compat_sys_execveat
|
||||
jmp return_from_execve
|
||||
CFI_ENDPROC
|
||||
END(stub32_execveat)
|
||||
END(stub_x32_execveat)
|
||||
#endif
|
||||
|
@ -562,8 +471,6 @@ END(stub_x32_execveat)
|
|||
* This cannot be done with SYSRET, so use the IRET return path instead.
|
||||
*/
|
||||
ENTRY(stub_rt_sigreturn)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
/*
|
||||
* SAVE_EXTRA_REGS result is not normally needed:
|
||||
* sigreturn overwrites all pt_regs->GPREGS.
|
||||
|
@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
|
|||
call sys_rt_sigreturn
|
||||
return_from_stub:
|
||||
addq $8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
RESTORE_EXTRA_REGS
|
||||
movq %rax,RAX(%rsp)
|
||||
jmp int_ret_from_sys_call
|
||||
CFI_ENDPROC
|
||||
END(stub_rt_sigreturn)
|
||||
|
||||
#ifdef CONFIG_X86_X32_ABI
|
||||
ENTRY(stub_x32_rt_sigreturn)
|
||||
CFI_STARTPROC
|
||||
DEFAULT_FRAME 0, 8
|
||||
SAVE_EXTRA_REGS 8
|
||||
call sys32_x32_rt_sigreturn
|
||||
jmp return_from_stub
|
||||
CFI_ENDPROC
|
||||
END(stub_x32_rt_sigreturn)
|
||||
#endif
|
||||
|
||||
|
@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
|
|||
* rdi: prev task we switched from
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
DEFAULT_FRAME
|
||||
|
||||
LOCK ; btr $TIF_FORK,TI_flags(%r8)
|
||||
|
||||
pushq_cfi $0x0002
|
||||
popfq_cfi # reset kernel eflags
|
||||
pushq $0x0002
|
||||
popfq # reset kernel eflags
|
||||
|
||||
call schedule_tail # rdi: 'prev' task parameter
|
||||
|
||||
|
@ -615,7 +516,7 @@ ENTRY(ret_from_fork)
|
|||
/*
|
||||
* By the time we get here, we have no idea whether our pt_regs,
|
||||
* ti flags, and ti status came from the 64-bit SYSCALL fast path,
|
||||
* the slow path, or one of the ia32entry paths.
|
||||
* the slow path, or one of the 32-bit compat paths.
|
||||
* Use IRET code path to return, since it can safely handle
|
||||
* all of the above.
|
||||
*/
|
||||
|
@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
|
|||
movl $0, RAX(%rsp)
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp int_ret_from_sys_call
|
||||
CFI_ENDPROC
|
||||
END(ret_from_fork)
|
||||
|
||||
/*
|
||||
|
@ -637,16 +537,13 @@ END(ret_from_fork)
|
|||
*/
|
||||
.align 8
|
||||
ENTRY(irq_entries_start)
|
||||
INTR_FRAME
|
||||
vector=FIRST_EXTERNAL_VECTOR
|
||||
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
|
||||
pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
|
||||
pushq $(~vector+0x80) /* Note: always in signed byte range */
|
||||
vector=vector+1
|
||||
jmp common_interrupt
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
.align 8
|
||||
.endr
|
||||
CFI_ENDPROC
|
||||
END(irq_entries_start)
|
||||
|
||||
/*
|
||||
|
@ -688,17 +585,7 @@ END(irq_entries_start)
|
|||
movq %rsp, %rsi
|
||||
incl PER_CPU_VAR(irq_count)
|
||||
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
CFI_DEF_CFA_REGISTER rsi
|
||||
pushq %rsi
|
||||
/*
|
||||
* For debugger:
|
||||
* "CFA (Current Frame Address) is the value on stack + offset"
|
||||
*/
|
||||
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
|
||||
0x77 /* DW_OP_breg7 (rsp) */, 0, \
|
||||
0x06 /* DW_OP_deref */, \
|
||||
0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
|
||||
0x22 /* DW_OP_plus */
|
||||
/* We entered an interrupt context - irqs are off: */
|
||||
TRACE_IRQS_OFF
|
||||
|
||||
|
@ -711,7 +598,6 @@ END(irq_entries_start)
|
|||
*/
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
common_interrupt:
|
||||
XCPT_FRAME
|
||||
ASM_CLAC
|
||||
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
|
||||
interrupt do_IRQ
|
||||
|
@ -723,16 +609,13 @@ ret_from_intr:
|
|||
|
||||
/* Restore saved previous stack */
|
||||
popq %rsi
|
||||
CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
|
||||
/* return code expects complete pt_regs - adjust rsp accordingly: */
|
||||
leaq -RBP(%rsi),%rsp
|
||||
CFI_DEF_CFA_REGISTER rsp
|
||||
CFI_ADJUST_CFA_OFFSET RBP
|
||||
|
||||
testb $3, CS(%rsp)
|
||||
jz retint_kernel
|
||||
/* Interrupt came from user space */
|
||||
|
||||
retint_user:
|
||||
GET_THREAD_INFO(%rcx)
|
||||
/*
|
||||
* %rcx: thread info. Interrupts off.
|
||||
|
@ -743,7 +626,6 @@ retint_check:
|
|||
LOCKDEP_SYS_EXIT_IRQ
|
||||
movl TI_flags(%rcx),%edx
|
||||
andl %edi,%edx
|
||||
CFI_REMEMBER_STATE
|
||||
jnz retint_careful
|
||||
|
||||
retint_swapgs: /* return to user-space */
|
||||
|
@ -781,8 +663,6 @@ retint_kernel:
|
|||
restore_c_regs_and_iret:
|
||||
RESTORE_C_REGS
|
||||
REMOVE_PT_GPREGS_FROM_STACK 8
|
||||
|
||||
irq_return:
|
||||
INTERRUPT_RETURN
|
||||
|
||||
ENTRY(native_iret)
|
||||
|
@ -807,8 +687,8 @@ native_irq_return_iret:
|
|||
|
||||
#ifdef CONFIG_X86_ESPFIX64
|
||||
native_irq_return_ldt:
|
||||
pushq_cfi %rax
|
||||
pushq_cfi %rdi
|
||||
pushq %rax
|
||||
pushq %rdi
|
||||
SWAPGS
|
||||
movq PER_CPU_VAR(espfix_waddr),%rdi
|
||||
movq %rax,(0*8)(%rdi) /* RAX */
|
||||
|
@ -823,24 +703,23 @@ native_irq_return_ldt:
|
|||
movq (5*8)(%rsp),%rax /* RSP */
|
||||
movq %rax,(4*8)(%rdi)
|
||||
andl $0xffff0000,%eax
|
||||
popq_cfi %rdi
|
||||
popq %rdi
|
||||
orq PER_CPU_VAR(espfix_stack),%rax
|
||||
SWAPGS
|
||||
movq %rax,%rsp
|
||||
popq_cfi %rax
|
||||
popq %rax
|
||||
jmp native_irq_return_iret
|
||||
#endif
|
||||
|
||||
/* edi: workmask, edx: work */
|
||||
retint_careful:
|
||||
CFI_RESTORE_STATE
|
||||
bt $TIF_NEED_RESCHED,%edx
|
||||
jnc retint_signal
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %rdi
|
||||
pushq %rdi
|
||||
SCHEDULE_USER
|
||||
popq_cfi %rdi
|
||||
popq %rdi
|
||||
GET_THREAD_INFO(%rcx)
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
|
@ -862,7 +741,6 @@ retint_signal:
|
|||
GET_THREAD_INFO(%rcx)
|
||||
jmp retint_with_reschedule
|
||||
|
||||
CFI_ENDPROC
|
||||
END(common_interrupt)
|
||||
|
||||
/*
|
||||
|
@ -870,13 +748,11 @@ END(common_interrupt)
|
|||
*/
|
||||
.macro apicinterrupt3 num sym do_sym
|
||||
ENTRY(\sym)
|
||||
INTR_FRAME
|
||||
ASM_CLAC
|
||||
pushq_cfi $~(\num)
|
||||
pushq $~(\num)
|
||||
.Lcommon_\sym:
|
||||
interrupt \do_sym
|
||||
jmp ret_from_intr
|
||||
CFI_ENDPROC
|
||||
END(\sym)
|
||||
.endm
|
||||
|
||||
|
@ -966,24 +842,17 @@ ENTRY(\sym)
|
|||
.error "using shift_ist requires paranoid=1"
|
||||
.endif
|
||||
|
||||
.if \has_error_code
|
||||
XCPT_FRAME
|
||||
.else
|
||||
INTR_FRAME
|
||||
.endif
|
||||
|
||||
ASM_CLAC
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
|
||||
.ifeq \has_error_code
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
||||
.endif
|
||||
|
||||
ALLOC_PT_GPREGS_ON_STACK
|
||||
|
||||
.if \paranoid
|
||||
.if \paranoid == 1
|
||||
CFI_REMEMBER_STATE
|
||||
testb $3, CS(%rsp) /* If coming from userspace, switch */
|
||||
jnz 1f /* stacks. */
|
||||
.endif
|
||||
|
@ -993,8 +862,6 @@ ENTRY(\sym)
|
|||
.endif
|
||||
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
|
||||
|
||||
DEFAULT_FRAME 0
|
||||
|
||||
.if \paranoid
|
||||
.if \shift_ist != -1
|
||||
TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
|
||||
|
@ -1030,7 +897,6 @@ ENTRY(\sym)
|
|||
.endif
|
||||
|
||||
.if \paranoid == 1
|
||||
CFI_RESTORE_STATE
|
||||
/*
|
||||
* Paranoid entry from userspace. Switch stacks and treat it
|
||||
* as a normal entry. This means that paranoid handlers
|
||||
|
@ -1039,7 +905,6 @@ ENTRY(\sym)
|
|||
1:
|
||||
call error_entry
|
||||
|
||||
DEFAULT_FRAME 0
|
||||
|
||||
movq %rsp,%rdi /* pt_regs pointer */
|
||||
call sync_regs
|
||||
|
@ -1058,8 +923,6 @@ ENTRY(\sym)
|
|||
|
||||
jmp error_exit /* %ebx: no swapgs flag */
|
||||
.endif
|
||||
|
||||
CFI_ENDPROC
|
||||
END(\sym)
|
||||
.endm
|
||||
|
||||
|
@ -1092,17 +955,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
|
|||
/* Reload gs selector with exception handling */
|
||||
/* edi: new selector */
|
||||
ENTRY(native_load_gs_index)
|
||||
CFI_STARTPROC
|
||||
pushfq_cfi
|
||||
pushfq
|
||||
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
|
||||
SWAPGS
|
||||
gs_change:
|
||||
movl %edi,%gs
|
||||
2: mfence /* workaround */
|
||||
SWAPGS
|
||||
popfq_cfi
|
||||
popfq
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(native_load_gs_index)
|
||||
|
||||
_ASM_EXTABLE(gs_change,bad_gs)
|
||||
|
@ -1117,22 +978,15 @@ bad_gs:
|
|||
|
||||
/* Call softirq on interrupt stack. Interrupts are off. */
|
||||
ENTRY(do_softirq_own_stack)
|
||||
CFI_STARTPROC
|
||||
pushq_cfi %rbp
|
||||
CFI_REL_OFFSET rbp,0
|
||||
pushq %rbp
|
||||
mov %rsp,%rbp
|
||||
CFI_DEF_CFA_REGISTER rbp
|
||||
incl PER_CPU_VAR(irq_count)
|
||||
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
push %rbp # backlink for old unwinder
|
||||
call __do_softirq
|
||||
leaveq
|
||||
CFI_RESTORE rbp
|
||||
CFI_DEF_CFA_REGISTER rsp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
decl PER_CPU_VAR(irq_count)
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(do_softirq_own_stack)
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
@ -1152,28 +1006,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
|
|||
* activation and restart the handler using the previous one.
|
||||
*/
|
||||
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
||||
CFI_STARTPROC
|
||||
/*
|
||||
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
|
||||
* see the correct pointer to the pt_regs
|
||||
*/
|
||||
movq %rdi, %rsp # we don't return, adjust the stack frame
|
||||
CFI_ENDPROC
|
||||
DEFAULT_FRAME
|
||||
11: incl PER_CPU_VAR(irq_count)
|
||||
movq %rsp,%rbp
|
||||
CFI_DEF_CFA_REGISTER rbp
|
||||
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
pushq %rbp # backlink for old unwinder
|
||||
call xen_evtchn_do_upcall
|
||||
popq %rsp
|
||||
CFI_DEF_CFA_REGISTER rsp
|
||||
decl PER_CPU_VAR(irq_count)
|
||||
#ifndef CONFIG_PREEMPT
|
||||
call xen_maybe_preempt_hcall
|
||||
#endif
|
||||
jmp error_exit
|
||||
CFI_ENDPROC
|
||||
END(xen_do_hypervisor_callback)
|
||||
|
||||
/*
|
||||
|
@ -1190,16 +1038,8 @@ END(xen_do_hypervisor_callback)
|
|||
* with its current contents: any discrepancy means we in category 1.
|
||||
*/
|
||||
ENTRY(xen_failsafe_callback)
|
||||
INTR_FRAME 1 (6*8)
|
||||
/*CFI_REL_OFFSET gs,GS*/
|
||||
/*CFI_REL_OFFSET fs,FS*/
|
||||
/*CFI_REL_OFFSET es,ES*/
|
||||
/*CFI_REL_OFFSET ds,DS*/
|
||||
CFI_REL_OFFSET r11,8
|
||||
CFI_REL_OFFSET rcx,0
|
||||
movl %ds,%ecx
|
||||
cmpw %cx,0x10(%rsp)
|
||||
CFI_REMEMBER_STATE
|
||||
jne 1f
|
||||
movl %es,%ecx
|
||||
cmpw %cx,0x18(%rsp)
|
||||
|
@ -1212,29 +1052,21 @@ ENTRY(xen_failsafe_callback)
|
|||
jne 1f
|
||||
/* All segments match their saved values => Category 2 (Bad IRET). */
|
||||
movq (%rsp),%rcx
|
||||
CFI_RESTORE rcx
|
||||
movq 8(%rsp),%r11
|
||||
CFI_RESTORE r11
|
||||
addq $0x30,%rsp
|
||||
CFI_ADJUST_CFA_OFFSET -0x30
|
||||
pushq_cfi $0 /* RIP */
|
||||
pushq_cfi %r11
|
||||
pushq_cfi %rcx
|
||||
pushq $0 /* RIP */
|
||||
pushq %r11
|
||||
pushq %rcx
|
||||
jmp general_protection
|
||||
CFI_RESTORE_STATE
|
||||
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
|
||||
movq (%rsp),%rcx
|
||||
CFI_RESTORE rcx
|
||||
movq 8(%rsp),%r11
|
||||
CFI_RESTORE r11
|
||||
addq $0x30,%rsp
|
||||
CFI_ADJUST_CFA_OFFSET -0x30
|
||||
pushq_cfi $-1 /* orig_ax = -1 => not a system call */
|
||||
pushq $-1 /* orig_ax = -1 => not a system call */
|
||||
ALLOC_PT_GPREGS_ON_STACK
|
||||
SAVE_C_REGS
|
||||
SAVE_EXTRA_REGS
|
||||
jmp error_exit
|
||||
CFI_ENDPROC
|
||||
END(xen_failsafe_callback)
|
||||
|
||||
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
|
||||
|
@ -1270,7 +1102,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
|
|||
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
|
||||
*/
|
||||
ENTRY(paranoid_entry)
|
||||
XCPT_FRAME 1 15*8
|
||||
cld
|
||||
SAVE_C_REGS 8
|
||||
SAVE_EXTRA_REGS 8
|
||||
|
@ -1282,7 +1113,6 @@ ENTRY(paranoid_entry)
|
|||
SWAPGS
|
||||
xorl %ebx,%ebx
|
||||
1: ret
|
||||
CFI_ENDPROC
|
||||
END(paranoid_entry)
|
||||
|
||||
/*
|
||||
|
@ -1297,7 +1127,6 @@ END(paranoid_entry)
|
|||
*/
|
||||
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
|
||||
ENTRY(paranoid_exit)
|
||||
DEFAULT_FRAME
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF_DEBUG
|
||||
testl %ebx,%ebx /* swapgs needed? */
|
||||
|
@ -1312,7 +1141,6 @@ paranoid_exit_restore:
|
|||
RESTORE_C_REGS
|
||||
REMOVE_PT_GPREGS_FROM_STACK 8
|
||||
INTERRUPT_RETURN
|
||||
CFI_ENDPROC
|
||||
END(paranoid_exit)
|
||||
|
||||
/*
|
||||
|
@ -1320,7 +1148,6 @@ END(paranoid_exit)
|
|||
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
|
||||
*/
|
||||
ENTRY(error_entry)
|
||||
XCPT_FRAME 1 15*8
|
||||
cld
|
||||
SAVE_C_REGS 8
|
||||
SAVE_EXTRA_REGS 8
|
||||
|
@ -1340,7 +1167,6 @@ error_sti:
|
|||
* for these here too.
|
||||
*/
|
||||
error_kernelspace:
|
||||
CFI_REL_OFFSET rcx, RCX+8
|
||||
incl %ebx
|
||||
leaq native_irq_return_iret(%rip),%rcx
|
||||
cmpq %rcx,RIP+8(%rsp)
|
||||
|
@ -1364,32 +1190,22 @@ error_bad_iret:
|
|||
mov %rax,%rsp
|
||||
decl %ebx /* Return to usergs */
|
||||
jmp error_sti
|
||||
CFI_ENDPROC
|
||||
END(error_entry)
|
||||
|
||||
|
||||
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
|
||||
ENTRY(error_exit)
|
||||
DEFAULT_FRAME
|
||||
movl %ebx,%eax
|
||||
RESTORE_EXTRA_REGS
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
GET_THREAD_INFO(%rcx)
|
||||
testl %eax,%eax
|
||||
jnz retint_kernel
|
||||
LOCKDEP_SYS_EXIT_IRQ
|
||||
movl TI_flags(%rcx),%edx
|
||||
movl $_TIF_WORK_MASK,%edi
|
||||
andl %edi,%edx
|
||||
jnz retint_careful
|
||||
jmp retint_swapgs
|
||||
CFI_ENDPROC
|
||||
jmp retint_user
|
||||
END(error_exit)
|
||||
|
||||
/* Runs on exception stack */
|
||||
ENTRY(nmi)
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
/*
|
||||
* We allow breakpoints in NMIs. If a breakpoint occurs, then
|
||||
|
@ -1424,8 +1240,7 @@ ENTRY(nmi)
|
|||
*/
|
||||
|
||||
/* Use %rdx as our temp variable throughout */
|
||||
pushq_cfi %rdx
|
||||
CFI_REL_OFFSET rdx, 0
|
||||
pushq %rdx
|
||||
|
||||
/*
|
||||
* If %cs was not the kernel segment, then the NMI triggered in user
|
||||
|
@ -1459,8 +1274,6 @@ ENTRY(nmi)
|
|||
jb first_nmi
|
||||
/* Ah, it is within the NMI stack, treat it as nested */
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
|
||||
nested_nmi:
|
||||
/*
|
||||
* Do nothing if we interrupted the fixup in repeat_nmi.
|
||||
|
@ -1478,26 +1291,22 @@ nested_nmi:
|
|||
/* Set up the interrupted NMIs stack to jump to repeat_nmi */
|
||||
leaq -1*8(%rsp), %rdx
|
||||
movq %rdx, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET 1*8
|
||||
leaq -10*8(%rsp), %rdx
|
||||
pushq_cfi $__KERNEL_DS
|
||||
pushq_cfi %rdx
|
||||
pushfq_cfi
|
||||
pushq_cfi $__KERNEL_CS
|
||||
pushq_cfi $repeat_nmi
|
||||
pushq $__KERNEL_DS
|
||||
pushq %rdx
|
||||
pushfq
|
||||
pushq $__KERNEL_CS
|
||||
pushq $repeat_nmi
|
||||
|
||||
/* Put stack back */
|
||||
addq $(6*8), %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -6*8
|
||||
|
||||
nested_nmi_out:
|
||||
popq_cfi %rdx
|
||||
CFI_RESTORE rdx
|
||||
popq %rdx
|
||||
|
||||
/* No need to check faults here */
|
||||
INTERRUPT_RETURN
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
first_nmi:
|
||||
/*
|
||||
* Because nested NMIs will use the pushed location that we
|
||||
|
@ -1536,22 +1345,19 @@ first_nmi:
|
|||
*/
|
||||
/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
|
||||
movq (%rsp), %rdx
|
||||
CFI_RESTORE rdx
|
||||
|
||||
/* Set the NMI executing variable on the stack. */
|
||||
pushq_cfi $1
|
||||
pushq $1
|
||||
|
||||
/*
|
||||
* Leave room for the "copied" frame
|
||||
*/
|
||||
subq $(5*8), %rsp
|
||||
CFI_ADJUST_CFA_OFFSET 5*8
|
||||
|
||||
/* Copy the stack frame to the Saved frame */
|
||||
.rept 5
|
||||
pushq_cfi 11*8(%rsp)
|
||||
pushq 11*8(%rsp)
|
||||
.endr
|
||||
CFI_DEF_CFA_OFFSET 5*8
|
||||
|
||||
/* Everything up to here is safe from nested NMIs */
|
||||
|
||||
|
@ -1574,12 +1380,10 @@ repeat_nmi:
|
|||
|
||||
/* Make another copy, this one may be modified by nested NMIs */
|
||||
addq $(10*8), %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -10*8
|
||||
.rept 5
|
||||
pushq_cfi -6*8(%rsp)
|
||||
pushq -6*8(%rsp)
|
||||
.endr
|
||||
subq $(5*8), %rsp
|
||||
CFI_DEF_CFA_OFFSET 5*8
|
||||
end_repeat_nmi:
|
||||
|
||||
/*
|
||||
|
@ -1587,7 +1391,7 @@ end_repeat_nmi:
|
|||
* NMI if the first NMI took an exception and reset our iret stack
|
||||
* so that we repeat another NMI.
|
||||
*/
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
||||
ALLOC_PT_GPREGS_ON_STACK
|
||||
|
||||
/*
|
||||
|
@ -1598,7 +1402,6 @@ end_repeat_nmi:
|
|||
* exceptions might do.
|
||||
*/
|
||||
call paranoid_entry
|
||||
DEFAULT_FRAME 0
|
||||
|
||||
/*
|
||||
* Save off the CR2 register. If we take a page fault in the NMI then
|
||||
|
@ -1634,14 +1437,11 @@ nmi_restore:
|
|||
|
||||
/* Clear the NMI executing stack variable */
|
||||
movq $0, 5*8(%rsp)
|
||||
jmp irq_return
|
||||
CFI_ENDPROC
|
||||
INTERRUPT_RETURN
|
||||
END(nmi)
|
||||
|
||||
ENTRY(ignore_sysret)
|
||||
CFI_STARTPROC
|
||||
mov $-ENOSYS,%eax
|
||||
sysret
|
||||
CFI_ENDPROC
|
||||
END(ignore_sysret)
|
||||
|
|
@ -0,0 +1,547 @@
|
|||
/*
|
||||
* Compatibility mode system call entry point for x86-64.
|
||||
*
|
||||
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
#include "calling.h"
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ia32_unistd.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
#include <linux/elf-em.h>
|
||||
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
|
||||
#define __AUDIT_ARCH_LE 0x40000000
|
||||
|
||||
#ifndef CONFIG_AUDITSYSCALL
|
||||
# define sysexit_audit ia32_ret_from_sys_call
|
||||
# define sysretl_audit ia32_ret_from_sys_call
|
||||
#endif
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
ENTRY(native_usergs_sysret32)
|
||||
swapgs
|
||||
sysretl
|
||||
ENDPROC(native_usergs_sysret32)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 32-bit SYSENTER instruction entry.
|
||||
*
|
||||
* SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
|
||||
* IF and VM in rflags are cleared (IOW: interrupts are off).
|
||||
* SYSENTER does not save anything on the stack,
|
||||
* and does not save old rip (!!!) and rflags.
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ebx arg1
|
||||
* ecx arg2
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* ebp user stack
|
||||
* 0(%ebp) arg6
|
||||
*
|
||||
* This is purely a fast path. For anything complicated we use the int 0x80
|
||||
* path below. We set up a complete hardware stack frame to share code
|
||||
* with the int 0x80 path.
|
||||
*/
|
||||
ENTRY(entry_SYSENTER_compat)
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %ebp, %ebp
|
||||
movl %eax, %eax
|
||||
|
||||
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq $__USER32_DS /* pt_regs->ss */
|
||||
pushq %rbp /* pt_regs->sp */
|
||||
pushfq /* pt_regs->flags */
|
||||
pushq $__USER32_CS /* pt_regs->cs */
|
||||
pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
cld
|
||||
sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
|
||||
|
||||
/*
|
||||
* no need to do an access_ok check here because rbp has been
|
||||
* 32-bit zero extended
|
||||
*/
|
||||
ASM_STAC
|
||||
1: movl (%rbp), %ebp
|
||||
_ASM_EXTABLE(1b, ia32_badarg)
|
||||
ASM_CLAC
|
||||
|
||||
/*
|
||||
* Sysenter doesn't filter flags, so we need to clear NT
|
||||
* ourselves. To save a few cycles, we can check whether
|
||||
* NT was set instead of doing an unconditional popfq.
|
||||
*/
|
||||
testl $X86_EFLAGS_NT, EFLAGS(%rsp)
|
||||
jnz sysenter_fix_flags
|
||||
sysenter_flags_fixed:
|
||||
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz sysenter_tracesys
|
||||
|
||||
sysenter_do_call:
|
||||
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
||||
movl %edi, %r8d /* arg5 */
|
||||
movl %ebp, %r9d /* arg6 */
|
||||
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx, %edi /* arg1 */
|
||||
movl %edx, %edx /* arg3 (zero extension) */
|
||||
sysenter_dispatch:
|
||||
cmpq $(IA32_NR_syscalls-1), %rax
|
||||
ja 1f
|
||||
call *ia32_sys_call_table(, %rax, 8)
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz sysexit_audit
|
||||
sysexit_from_sys_call:
|
||||
/*
|
||||
* NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
|
||||
* NMI between STI and SYSEXIT has poorly specified behavior,
|
||||
* and and NMI followed by an IRQ with usergs is fatal. So
|
||||
* we just pretend we're using SYSEXIT but we really use
|
||||
* SYSRETL instead.
|
||||
*
|
||||
* This code path is still called 'sysexit' because it pairs
|
||||
* with 'sysenter' and it uses the SYSENTER calling convention.
|
||||
*/
|
||||
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
movl RIP(%rsp), %ecx /* User %eip */
|
||||
RESTORE_RSI_RDI
|
||||
xorl %edx, %edx /* Do not leak kernel information */
|
||||
xorq %r8, %r8
|
||||
xorq %r9, %r9
|
||||
xorq %r10, %r10
|
||||
movl EFLAGS(%rsp), %r11d /* User eflags */
|
||||
TRACE_IRQS_ON
|
||||
|
||||
/*
|
||||
* SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
|
||||
* since it avoids a dicey window with interrupts enabled.
|
||||
*/
|
||||
movl RSP(%rsp), %esp
|
||||
|
||||
/*
|
||||
* USERGS_SYSRET32 does:
|
||||
* gsbase = user's gs base
|
||||
* eip = ecx
|
||||
* rflags = r11
|
||||
* cs = __USER32_CS
|
||||
* ss = __USER_DS
|
||||
*
|
||||
* The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
|
||||
*
|
||||
* pop %ebp
|
||||
* pop %edx
|
||||
* pop %ecx
|
||||
*
|
||||
* Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
|
||||
* avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
|
||||
* address (already known to user code), and R12-R15 are
|
||||
* callee-saved and therefore don't contain any interesting
|
||||
* kernel data.
|
||||
*/
|
||||
USERGS_SYSRET32
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
.macro auditsys_entry_common
|
||||
movl %esi, %r8d /* 5th arg: 4th syscall arg */
|
||||
movl %ecx, %r9d /* swap with edx */
|
||||
movl %edx, %ecx /* 4th arg: 3rd syscall arg */
|
||||
movl %r9d, %edx /* 3rd arg: 2nd syscall arg */
|
||||
movl %ebx, %esi /* 2nd arg: 1st syscall arg */
|
||||
movl %eax, %edi /* 1st arg: syscall number */
|
||||
call __audit_syscall_entry
|
||||
movl ORIG_RAX(%rsp), %eax /* reload syscall number */
|
||||
movl %ebx, %edi /* reload 1st syscall arg */
|
||||
movl RCX(%rsp), %esi /* reload 2nd syscall arg */
|
||||
movl RDX(%rsp), %edx /* reload 3rd syscall arg */
|
||||
movl RSI(%rsp), %ecx /* reload 4th syscall arg */
|
||||
movl RDI(%rsp), %r8d /* reload 5th syscall arg */
|
||||
.endm
|
||||
|
||||
.macro auditsys_exit exit
|
||||
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz ia32_ret_from_sys_call
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
movl %eax, %esi /* second arg, syscall return value */
|
||||
cmpl $-MAX_ERRNO, %eax /* is it an error ? */
|
||||
jbe 1f
|
||||
movslq %eax, %rsi /* if error sign extend to 64 bits */
|
||||
1: setbe %al /* 1 if error, 0 if not */
|
||||
movzbl %al, %edi /* zero-extend that into %edi */
|
||||
call __audit_syscall_exit
|
||||
movq RAX(%rsp), %rax /* reload syscall return value */
|
||||
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz \exit
|
||||
xorl %eax, %eax /* Do not leak kernel information */
|
||||
movq %rax, R11(%rsp)
|
||||
movq %rax, R10(%rsp)
|
||||
movq %rax, R9(%rsp)
|
||||
movq %rax, R8(%rsp)
|
||||
jmp int_with_check
|
||||
.endm
|
||||
|
||||
sysenter_auditsys:
|
||||
auditsys_entry_common
|
||||
movl %ebp, %r9d /* reload 6th syscall arg */
|
||||
jmp sysenter_dispatch
|
||||
|
||||
sysexit_audit:
|
||||
auditsys_exit sysexit_from_sys_call
|
||||
#endif
|
||||
|
||||
sysenter_fix_flags:
|
||||
pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
|
||||
popfq
|
||||
jmp sysenter_flags_fixed
|
||||
|
||||
sysenter_tracesys:
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz sysenter_auditsys
|
||||
#endif
|
||||
SAVE_EXTRA_REGS
|
||||
xorl %eax, %eax /* Do not leak kernel information */
|
||||
movq %rax, R11(%rsp)
|
||||
movq %rax, R10(%rsp)
|
||||
movq %rax, R9(%rsp)
|
||||
movq %rax, R8(%rsp)
|
||||
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
|
||||
/* Reload arg registers from stack. (see sysenter_tracesys) */
|
||||
movl RCX(%rsp), %ecx
|
||||
movl RDX(%rsp), %edx
|
||||
movl RSI(%rsp), %esi
|
||||
movl RDI(%rsp), %edi
|
||||
movl %eax, %eax /* zero extension */
|
||||
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp sysenter_do_call
|
||||
ENDPROC(entry_SYSENTER_compat)
|
||||
|
||||
/*
|
||||
* 32-bit SYSCALL instruction entry.
|
||||
*
|
||||
* 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
|
||||
* then loads new ss, cs, and rip from previously programmed MSRs.
|
||||
* rflags gets masked by a value from another MSR (so CLD and CLAC
|
||||
* are not needed). SYSCALL does not save anything on the stack
|
||||
* and does not change rsp.
|
||||
*
|
||||
* Note: rflags saving+masking-with-MSR happens only in Long mode
|
||||
* (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
|
||||
* Don't get confused: rflags saving+masking depends on Long Mode Active bit
|
||||
* (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
|
||||
* or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ecx return address
|
||||
* ebx arg1
|
||||
* ebp arg2 (note: not saved in the stack frame, should not be touched)
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* esp user stack
|
||||
* 0(%esp) arg6
|
||||
*
|
||||
* This is purely a fast path. For anything complicated we use the int 0x80
|
||||
* path below. We set up a complete hardware stack frame to share code
|
||||
* with the int 0x80 path.
|
||||
*/
|
||||
ENTRY(entry_SYSCALL_compat)
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movl %esp, %r8d
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %eax, %eax
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq $__USER32_DS /* pt_regs->ss */
|
||||
pushq %r8 /* pt_regs->sp */
|
||||
pushq %r11 /* pt_regs->flags */
|
||||
pushq $__USER32_CS /* pt_regs->cs */
|
||||
pushq %rcx /* pt_regs->ip */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rbp /* pt_regs->cx */
|
||||
movl %ebp, %ecx
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
|
||||
|
||||
/*
|
||||
* No need to do an access_ok check here because r8 has been
|
||||
* 32-bit zero extended:
|
||||
*/
|
||||
ASM_STAC
|
||||
1: movl (%r8), %ebp
|
||||
_ASM_EXTABLE(1b, ia32_badarg)
|
||||
ASM_CLAC
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz cstar_tracesys
|
||||
|
||||
cstar_do_call:
|
||||
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
||||
movl %edi, %r8d /* arg5 */
|
||||
movl %ebp, %r9d /* arg6 */
|
||||
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx, %edi /* arg1 */
|
||||
movl %edx, %edx /* arg3 (zero extension) */
|
||||
|
||||
cstar_dispatch:
|
||||
cmpq $(IA32_NR_syscalls-1), %rax
|
||||
ja 1f
|
||||
|
||||
call *ia32_sys_call_table(, %rax, 8)
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz sysretl_audit
|
||||
|
||||
sysretl_from_sys_call:
|
||||
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
movl RCX(%rsp), %ebp
|
||||
RESTORE_RSI_RDI_RDX
|
||||
movl RIP(%rsp), %ecx
|
||||
movl EFLAGS(%rsp), %r11d
|
||||
xorq %r10, %r10
|
||||
xorq %r9, %r9
|
||||
xorq %r8, %r8
|
||||
TRACE_IRQS_ON
|
||||
movl RSP(%rsp), %esp
|
||||
/*
|
||||
* 64-bit->32-bit SYSRET restores eip from ecx,
|
||||
* eflags from r11 (but RF and VM bits are forced to 0),
|
||||
* cs and ss are loaded from MSRs.
|
||||
* (Note: 32-bit->32-bit SYSRET is different: since r11
|
||||
* does not exist, it merely sets eflags.IF=1).
|
||||
*
|
||||
* NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
|
||||
* descriptor is not reinitialized. This means that we must
|
||||
* avoid SYSRET with SS == NULL, which could happen if we schedule,
|
||||
* exit the kernel, and re-enter using an interrupt vector. (All
|
||||
* interrupt entries on x86_64 set SS to NULL.) We prevent that
|
||||
* from happening by reloading SS in __switch_to.
|
||||
*/
|
||||
USERGS_SYSRET32
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
cstar_auditsys:
|
||||
auditsys_entry_common
|
||||
movl %ebp, %r9d /* reload 6th syscall arg */
|
||||
jmp cstar_dispatch
|
||||
|
||||
sysretl_audit:
|
||||
auditsys_exit sysretl_from_sys_call
|
||||
#endif
|
||||
|
||||
cstar_tracesys:
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz cstar_auditsys
|
||||
#endif
|
||||
SAVE_EXTRA_REGS
|
||||
xorl %eax, %eax /* Do not leak kernel information */
|
||||
movq %rax, R11(%rsp)
|
||||
movq %rax, R10(%rsp)
|
||||
movq %rax, R9(%rsp)
|
||||
movq %rax, R8(%rsp)
|
||||
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
|
||||
/* Reload arg registers from stack. (see sysenter_tracesys) */
|
||||
movl RCX(%rsp), %ecx
|
||||
movl RDX(%rsp), %edx
|
||||
movl RSI(%rsp), %esi
|
||||
movl RDI(%rsp), %edi
|
||||
movl %eax, %eax /* zero extension */
|
||||
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp cstar_do_call
|
||||
END(entry_SYSCALL_compat)
|
||||
|
||||
ia32_badarg:
|
||||
ASM_CLAC
|
||||
movq $-EFAULT, %rax
|
||||
jmp ia32_sysret
|
||||
|
||||
ia32_ret_from_sys_call:
|
||||
xorl %eax, %eax /* Do not leak kernel information */
|
||||
movq %rax, R11(%rsp)
|
||||
movq %rax, R10(%rsp)
|
||||
movq %rax, R9(%rsp)
|
||||
movq %rax, R8(%rsp)
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
/*
|
||||
* Emulated IA32 system calls via int 0x80.
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ebx arg1
|
||||
* ecx arg2
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* ebp arg6 (note: not saved in the stack frame, should not be touched)
|
||||
*
|
||||
* Notes:
|
||||
* Uses the same stack frame as the x86-64 version.
|
||||
* All registers except eax must be saved (but ptrace may violate that).
|
||||
* Arguments are zero extended. For system calls that want sign extension and
|
||||
* take long arguments a wrapper is needed. Most calls can just be called
|
||||
* directly.
|
||||
* Assumes it is only called from user space and entered with interrupts off.
|
||||
*/
|
||||
|
||||
ENTRY(entry_INT80_compat)
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
SWAPGS
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %eax, %eax
|
||||
|
||||
/* Construct struct pt_regs on stack (iret frame is already on stack) */
|
||||
pushq %rax /* pt_regs->orig_ax */
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
pushq %rcx /* pt_regs->cx */
|
||||
pushq $-ENOSYS /* pt_regs->ax */
|
||||
pushq $0 /* pt_regs->r8 */
|
||||
pushq $0 /* pt_regs->r9 */
|
||||
pushq $0 /* pt_regs->r10 */
|
||||
pushq $0 /* pt_regs->r11 */
|
||||
cld
|
||||
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
|
||||
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz ia32_tracesys
|
||||
|
||||
ia32_do_call:
|
||||
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
||||
movl %edi, %r8d /* arg5 */
|
||||
movl %ebp, %r9d /* arg6 */
|
||||
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx, %edi /* arg1 */
|
||||
movl %edx, %edx /* arg3 (zero extension) */
|
||||
cmpq $(IA32_NR_syscalls-1), %rax
|
||||
ja 1f
|
||||
|
||||
call *ia32_sys_call_table(, %rax, 8) /* RIP relative */
|
||||
|
||||
ia32_sysret:
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
ia32_tracesys:
|
||||
SAVE_EXTRA_REGS
|
||||
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
/*
|
||||
* Reload arg registers from stack in case ptrace changed them.
|
||||
* Don't reload %eax because syscall_trace_enter() returned
|
||||
* the %rax value we should see. But do truncate it to 32 bits.
|
||||
* If it's -1 to make us punt the syscall, then (u32)-1 is still
|
||||
* an appropriately invalid value.
|
||||
*/
|
||||
movl RCX(%rsp), %ecx
|
||||
movl RDX(%rsp), %edx
|
||||
movl RSI(%rsp), %esi
|
||||
movl RDI(%rsp), %edi
|
||||
movl %eax, %eax /* zero extension */
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp ia32_do_call
|
||||
END(entry_INT80_compat)
|
||||
|
||||
.macro PTREGSCALL label, func
|
||||
ALIGN
|
||||
GLOBAL(\label)
|
||||
leaq \func(%rip), %rax
|
||||
jmp ia32_ptregs_common
|
||||
.endm
|
||||
|
||||
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
|
||||
PTREGSCALL stub32_sigreturn, sys32_sigreturn
|
||||
PTREGSCALL stub32_fork, sys_fork
|
||||
PTREGSCALL stub32_vfork, sys_vfork
|
||||
|
||||
ALIGN
|
||||
GLOBAL(stub32_clone)
|
||||
leaq sys_clone(%rip), %rax
|
||||
/*
|
||||
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
|
||||
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
|
||||
*
|
||||
* The native 64-bit kernel's sys_clone() implements the latter,
|
||||
* so we need to swap arguments here before calling it:
|
||||
*/
|
||||
xchg %r8, %rcx
|
||||
jmp ia32_ptregs_common
|
||||
|
||||
ALIGN
|
||||
ia32_ptregs_common:
|
||||
SAVE_EXTRA_REGS 8
|
||||
call *%rax
|
||||
RESTORE_EXTRA_REGS 8
|
||||
ret
|
||||
END(ia32_ptregs_common)
|
|
@ -10,7 +10,7 @@
|
|||
#else
|
||||
#define SYM(sym, compat) sym
|
||||
#define ia32_sys_call_table sys_call_table
|
||||
#define __NR_ia32_syscall_max __NR_syscall_max
|
||||
#define __NR_entry_INT80_compat_max __NR_syscall_max
|
||||
#endif
|
||||
|
||||
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
|
||||
|
@ -23,11 +23,11 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
|
|||
|
||||
extern asmlinkage void sys_ni_syscall(void);
|
||||
|
||||
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
|
||||
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_entry_INT80_compat_max+1] = {
|
||||
/*
|
||||
* Smells like a compiler bug -- it doesn't work
|
||||
* when the & below is removed.
|
||||
*/
|
||||
[0 ... __NR_ia32_syscall_max] = &sys_ni_syscall,
|
||||
[0 ... __NR_entry_INT80_compat_max] = &sys_ni_syscall,
|
||||
#include <asm/syscalls_32.h>
|
||||
};
|
|
@ -1,5 +1,5 @@
|
|||
out := $(obj)/../include/generated/asm
|
||||
uapi := $(obj)/../include/generated/uapi/asm
|
||||
out := $(obj)/../../include/generated/asm
|
||||
uapi := $(obj)/../../include/generated/uapi/asm
|
||||
|
||||
# Create output directory if not already present
|
||||
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
|
|
@ -6,16 +6,14 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
/* put return address in eax (arg1) */
|
||||
.macro THUNK name, func, put_ret_addr_in_eax=0
|
||||
.globl \name
|
||||
\name:
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg eax
|
||||
pushl_cfi_reg ecx
|
||||
pushl_cfi_reg edx
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
|
||||
.if \put_ret_addr_in_eax
|
||||
/* Place EIP in the arg1 */
|
||||
|
@ -23,11 +21,10 @@
|
|||
.endif
|
||||
|
||||
call \func
|
||||
popl_cfi_reg edx
|
||||
popl_cfi_reg ecx
|
||||
popl_cfi_reg eax
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
_ASM_NOKPROBE(\name)
|
||||
.endm
|
||||
|
|
@ -6,35 +6,32 @@
|
|||
* Subject to the GNU public license, v.2. No warranty of any kind.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/calling.h>
|
||||
#include "calling.h"
|
||||
#include <asm/asm.h>
|
||||
|
||||
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
|
||||
.macro THUNK name, func, put_ret_addr_in_rdi=0
|
||||
.globl \name
|
||||
\name:
|
||||
CFI_STARTPROC
|
||||
|
||||
/* this one pushes 9 elems, the next one would be %rIP */
|
||||
pushq_cfi_reg rdi
|
||||
pushq_cfi_reg rsi
|
||||
pushq_cfi_reg rdx
|
||||
pushq_cfi_reg rcx
|
||||
pushq_cfi_reg rax
|
||||
pushq_cfi_reg r8
|
||||
pushq_cfi_reg r9
|
||||
pushq_cfi_reg r10
|
||||
pushq_cfi_reg r11
|
||||
pushq %rdi
|
||||
pushq %rsi
|
||||
pushq %rdx
|
||||
pushq %rcx
|
||||
pushq %rax
|
||||
pushq %r8
|
||||
pushq %r9
|
||||
pushq %r10
|
||||
pushq %r11
|
||||
|
||||
.if \put_ret_addr_in_rdi
|
||||
/* 9*8(%rsp) is return addr on stack */
|
||||
movq_cfi_restore 9*8, rdi
|
||||
movq 9*8(%rsp), %rdi
|
||||
.endif
|
||||
|
||||
call \func
|
||||
jmp restore
|
||||
CFI_ENDPROC
|
||||
_ASM_NOKPROBE(\name)
|
||||
.endm
|
||||
|
||||
|
@ -57,19 +54,16 @@
|
|||
#if defined(CONFIG_TRACE_IRQFLAGS) \
|
||||
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|
||||
|| defined(CONFIG_PREEMPT)
|
||||
CFI_STARTPROC
|
||||
CFI_ADJUST_CFA_OFFSET 9*8
|
||||
restore:
|
||||
popq_cfi_reg r11
|
||||
popq_cfi_reg r10
|
||||
popq_cfi_reg r9
|
||||
popq_cfi_reg r8
|
||||
popq_cfi_reg rax
|
||||
popq_cfi_reg rcx
|
||||
popq_cfi_reg rdx
|
||||
popq_cfi_reg rsi
|
||||
popq_cfi_reg rdi
|
||||
popq %r11
|
||||
popq %r10
|
||||
popq %r9
|
||||
popq %r8
|
||||
popq %rax
|
||||
popq %rcx
|
||||
popq %rdx
|
||||
popq %rsi
|
||||
popq %rdi
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
_ASM_NOKPROBE(restore)
|
||||
#endif
|
|
@ -0,0 +1,7 @@
|
|||
#
|
||||
# Makefile for the x86 low level vsyscall code
|
||||
#
|
||||
obj-y := vsyscall_gtod.o
|
||||
|
||||
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
|
||||
|
|
@ -24,6 +24,6 @@ TRACE_EVENT(emulate_vsyscall,
|
|||
#endif
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../arch/x86/kernel
|
||||
#define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/
|
||||
#define TRACE_INCLUDE_FILE vsyscall_trace
|
||||
#include <trace/define_trace.h>
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for the ia32 kernel emulation subsystem.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
|
||||
obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
|
||||
|
||||
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
|
||||
|
||||
|
|
|
@ -1,591 +0,0 @@
|
|||
/*
|
||||
* Compatibility mode system call entry point for x86-64.
|
||||
*
|
||||
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/calling.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ia32_unistd.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
#include <linux/elf-em.h>
|
||||
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
|
||||
#define __AUDIT_ARCH_LE 0x40000000
|
||||
|
||||
#ifndef CONFIG_AUDITSYSCALL
|
||||
#define sysexit_audit ia32_ret_from_sys_call
|
||||
#define sysretl_audit ia32_ret_from_sys_call
|
||||
#endif
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
/* clobbers %rax */
|
||||
.macro CLEAR_RREGS _r9=rax
|
||||
xorl %eax,%eax
|
||||
movq %rax,R11(%rsp)
|
||||
movq %rax,R10(%rsp)
|
||||
movq %\_r9,R9(%rsp)
|
||||
movq %rax,R8(%rsp)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Reload arg registers from stack in case ptrace changed them.
|
||||
* We don't reload %eax because syscall_trace_enter() returned
|
||||
* the %rax value we should see. Instead, we just truncate that
|
||||
* value to 32 bits again as we did on entry from user mode.
|
||||
* If it's a new value set by user_regset during entry tracing,
|
||||
* this matches the normal truncation of the user-mode value.
|
||||
* If it's -1 to make us punt the syscall, then (u32)-1 is still
|
||||
* an appropriately invalid value.
|
||||
*/
|
||||
.macro LOAD_ARGS32 _r9=0
|
||||
.if \_r9
|
||||
movl R9(%rsp),%r9d
|
||||
.endif
|
||||
movl RCX(%rsp),%ecx
|
||||
movl RDX(%rsp),%edx
|
||||
movl RSI(%rsp),%esi
|
||||
movl RDI(%rsp),%edi
|
||||
movl %eax,%eax /* zero extension */
|
||||
.endm
|
||||
|
||||
.macro CFI_STARTPROC32 simple
|
||||
CFI_STARTPROC \simple
|
||||
CFI_UNDEFINED r8
|
||||
CFI_UNDEFINED r9
|
||||
CFI_UNDEFINED r10
|
||||
CFI_UNDEFINED r11
|
||||
CFI_UNDEFINED r12
|
||||
CFI_UNDEFINED r13
|
||||
CFI_UNDEFINED r14
|
||||
CFI_UNDEFINED r15
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
ENTRY(native_usergs_sysret32)
|
||||
swapgs
|
||||
sysretl
|
||||
ENDPROC(native_usergs_sysret32)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 32bit SYSENTER instruction entry.
|
||||
*
|
||||
* SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
|
||||
* IF and VM in rflags are cleared (IOW: interrupts are off).
|
||||
* SYSENTER does not save anything on the stack,
|
||||
* and does not save old rip (!!!) and rflags.
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ebx arg1
|
||||
* ecx arg2
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* ebp user stack
|
||||
* 0(%ebp) arg6
|
||||
*
|
||||
* This is purely a fast path. For anything complicated we use the int 0x80
|
||||
* path below. We set up a complete hardware stack frame to share code
|
||||
* with the int 0x80 path.
|
||||
*/
|
||||
ENTRY(ia32_sysenter_target)
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,0
|
||||
CFI_REGISTER rsp,rbp
|
||||
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %ebp, %ebp
|
||||
movl %eax, %eax
|
||||
|
||||
movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
|
||||
CFI_REGISTER rip,r10
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq_cfi $__USER32_DS /* pt_regs->ss */
|
||||
pushq_cfi %rbp /* pt_regs->sp */
|
||||
CFI_REL_OFFSET rsp,0
|
||||
pushfq_cfi /* pt_regs->flags */
|
||||
pushq_cfi $__USER32_CS /* pt_regs->cs */
|
||||
pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
|
||||
CFI_REL_OFFSET rip,0
|
||||
pushq_cfi_reg rax /* pt_regs->orig_ax */
|
||||
pushq_cfi_reg rdi /* pt_regs->di */
|
||||
pushq_cfi_reg rsi /* pt_regs->si */
|
||||
pushq_cfi_reg rdx /* pt_regs->dx */
|
||||
pushq_cfi_reg rcx /* pt_regs->cx */
|
||||
pushq_cfi $-ENOSYS /* pt_regs->ax */
|
||||
cld
|
||||
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
|
||||
CFI_ADJUST_CFA_OFFSET 10*8
|
||||
|
||||
/*
|
||||
* no need to do an access_ok check here because rbp has been
|
||||
* 32bit zero extended
|
||||
*/
|
||||
ASM_STAC
|
||||
1: movl (%rbp),%ebp
|
||||
_ASM_EXTABLE(1b,ia32_badarg)
|
||||
ASM_CLAC
|
||||
|
||||
/*
|
||||
* Sysenter doesn't filter flags, so we need to clear NT
|
||||
* ourselves. To save a few cycles, we can check whether
|
||||
* NT was set instead of doing an unconditional popfq.
|
||||
*/
|
||||
testl $X86_EFLAGS_NT,EFLAGS(%rsp)
|
||||
jnz sysenter_fix_flags
|
||||
sysenter_flags_fixed:
|
||||
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
CFI_REMEMBER_STATE
|
||||
jnz sysenter_tracesys
|
||||
sysenter_do_call:
|
||||
/* 32bit syscall -> 64bit C ABI argument conversion */
|
||||
movl %edi,%r8d /* arg5 */
|
||||
movl %ebp,%r9d /* arg6 */
|
||||
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx,%edi /* arg1 */
|
||||
movl %edx,%edx /* arg3 (zero extension) */
|
||||
sysenter_dispatch:
|
||||
cmpq $(IA32_NR_syscalls-1),%rax
|
||||
ja 1f
|
||||
call *ia32_sys_call_table(,%rax,8)
|
||||
movq %rax,RAX(%rsp)
|
||||
1:
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz sysexit_audit
|
||||
sysexit_from_sys_call:
|
||||
/*
|
||||
* NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
|
||||
* NMI between STI and SYSEXIT has poorly specified behavior,
|
||||
* and and NMI followed by an IRQ with usergs is fatal. So
|
||||
* we just pretend we're using SYSEXIT but we really use
|
||||
* SYSRETL instead.
|
||||
*
|
||||
* This code path is still called 'sysexit' because it pairs
|
||||
* with 'sysenter' and it uses the SYSENTER calling convention.
|
||||
*/
|
||||
andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
movl RIP(%rsp),%ecx /* User %eip */
|
||||
CFI_REGISTER rip,rcx
|
||||
RESTORE_RSI_RDI
|
||||
xorl %edx,%edx /* avoid info leaks */
|
||||
xorq %r8,%r8
|
||||
xorq %r9,%r9
|
||||
xorq %r10,%r10
|
||||
movl EFLAGS(%rsp),%r11d /* User eflags */
|
||||
/*CFI_RESTORE rflags*/
|
||||
TRACE_IRQS_ON
|
||||
|
||||
/*
|
||||
* SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
|
||||
* since it avoids a dicey window with interrupts enabled.
|
||||
*/
|
||||
movl RSP(%rsp),%esp
|
||||
|
||||
/*
|
||||
* USERGS_SYSRET32 does:
|
||||
* gsbase = user's gs base
|
||||
* eip = ecx
|
||||
* rflags = r11
|
||||
* cs = __USER32_CS
|
||||
* ss = __USER_DS
|
||||
*
|
||||
* The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
|
||||
*
|
||||
* pop %ebp
|
||||
* pop %edx
|
||||
* pop %ecx
|
||||
*
|
||||
* Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
|
||||
* avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
|
||||
* address (already known to user code), and R12-R15 are
|
||||
* callee-saved and therefore don't contain any interesting
|
||||
* kernel data.
|
||||
*/
|
||||
USERGS_SYSRET32
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
.macro auditsys_entry_common
|
||||
movl %esi,%r8d /* 5th arg: 4th syscall arg */
|
||||
movl %ecx,%r9d /*swap with edx*/
|
||||
movl %edx,%ecx /* 4th arg: 3rd syscall arg */
|
||||
movl %r9d,%edx /* 3rd arg: 2nd syscall arg */
|
||||
movl %ebx,%esi /* 2nd arg: 1st syscall arg */
|
||||
movl %eax,%edi /* 1st arg: syscall number */
|
||||
call __audit_syscall_entry
|
||||
movl ORIG_RAX(%rsp),%eax /* reload syscall number */
|
||||
movl %ebx,%edi /* reload 1st syscall arg */
|
||||
movl RCX(%rsp),%esi /* reload 2nd syscall arg */
|
||||
movl RDX(%rsp),%edx /* reload 3rd syscall arg */
|
||||
movl RSI(%rsp),%ecx /* reload 4th syscall arg */
|
||||
movl RDI(%rsp),%r8d /* reload 5th syscall arg */
|
||||
.endm
|
||||
|
||||
.macro auditsys_exit exit
|
||||
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz ia32_ret_from_sys_call
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
movl %eax,%esi /* second arg, syscall return value */
|
||||
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
|
||||
jbe 1f
|
||||
movslq %eax, %rsi /* if error sign extend to 64 bits */
|
||||
1: setbe %al /* 1 if error, 0 if not */
|
||||
movzbl %al,%edi /* zero-extend that into %edi */
|
||||
call __audit_syscall_exit
|
||||
movq RAX(%rsp),%rax /* reload syscall return value */
|
||||
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz \exit
|
||||
CLEAR_RREGS
|
||||
jmp int_with_check
|
||||
.endm
|
||||
|
||||
sysenter_auditsys:
|
||||
auditsys_entry_common
|
||||
movl %ebp,%r9d /* reload 6th syscall arg */
|
||||
jmp sysenter_dispatch
|
||||
|
||||
sysexit_audit:
|
||||
auditsys_exit sysexit_from_sys_call
|
||||
#endif
|
||||
|
||||
sysenter_fix_flags:
|
||||
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
|
||||
popfq_cfi
|
||||
jmp sysenter_flags_fixed
|
||||
|
||||
sysenter_tracesys:
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz sysenter_auditsys
|
||||
#endif
|
||||
SAVE_EXTRA_REGS
|
||||
CLEAR_RREGS
|
||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp sysenter_do_call
|
||||
CFI_ENDPROC
|
||||
ENDPROC(ia32_sysenter_target)
|
||||
|
||||
/*
|
||||
* 32bit SYSCALL instruction entry.
|
||||
*
|
||||
* 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
|
||||
* then loads new ss, cs, and rip from previously programmed MSRs.
|
||||
* rflags gets masked by a value from another MSR (so CLD and CLAC
|
||||
* are not needed). SYSCALL does not save anything on the stack
|
||||
* and does not change rsp.
|
||||
*
|
||||
* Note: rflags saving+masking-with-MSR happens only in Long mode
|
||||
* (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it).
|
||||
* Don't get confused: rflags saving+masking depends on Long Mode Active bit
|
||||
* (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
|
||||
* or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ecx return address
|
||||
* ebx arg1
|
||||
* ebp arg2 (note: not saved in the stack frame, should not be touched)
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* esp user stack
|
||||
* 0(%esp) arg6
|
||||
*
|
||||
* This is purely a fast path. For anything complicated we use the int 0x80
|
||||
* path below. We set up a complete hardware stack frame to share code
|
||||
* with the int 0x80 path.
|
||||
*/
|
||||
ENTRY(ia32_cstar_target)
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,0
|
||||
CFI_REGISTER rip,rcx
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movl %esp,%r8d
|
||||
CFI_REGISTER rsp,r8
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %eax,%eax
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq_cfi $__USER32_DS /* pt_regs->ss */
|
||||
pushq_cfi %r8 /* pt_regs->sp */
|
||||
CFI_REL_OFFSET rsp,0
|
||||
pushq_cfi %r11 /* pt_regs->flags */
|
||||
pushq_cfi $__USER32_CS /* pt_regs->cs */
|
||||
pushq_cfi %rcx /* pt_regs->ip */
|
||||
CFI_REL_OFFSET rip,0
|
||||
pushq_cfi_reg rax /* pt_regs->orig_ax */
|
||||
pushq_cfi_reg rdi /* pt_regs->di */
|
||||
pushq_cfi_reg rsi /* pt_regs->si */
|
||||
pushq_cfi_reg rdx /* pt_regs->dx */
|
||||
pushq_cfi_reg rbp /* pt_regs->cx */
|
||||
movl %ebp,%ecx
|
||||
pushq_cfi $-ENOSYS /* pt_regs->ax */
|
||||
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
|
||||
CFI_ADJUST_CFA_OFFSET 10*8
|
||||
|
||||
/*
|
||||
* no need to do an access_ok check here because r8 has been
|
||||
* 32bit zero extended
|
||||
*/
|
||||
ASM_STAC
|
||||
1: movl (%r8),%r9d
|
||||
_ASM_EXTABLE(1b,ia32_badarg)
|
||||
ASM_CLAC
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
CFI_REMEMBER_STATE
|
||||
jnz cstar_tracesys
|
||||
cstar_do_call:
|
||||
/* 32bit syscall -> 64bit C ABI argument conversion */
|
||||
movl %edi,%r8d /* arg5 */
|
||||
/* r9 already loaded */ /* arg6 */
|
||||
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx,%edi /* arg1 */
|
||||
movl %edx,%edx /* arg3 (zero extension) */
|
||||
cstar_dispatch:
|
||||
cmpq $(IA32_NR_syscalls-1),%rax
|
||||
ja 1f
|
||||
call *ia32_sys_call_table(,%rax,8)
|
||||
movq %rax,RAX(%rsp)
|
||||
1:
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz sysretl_audit
|
||||
sysretl_from_sys_call:
|
||||
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
RESTORE_RSI_RDI_RDX
|
||||
movl RIP(%rsp),%ecx
|
||||
CFI_REGISTER rip,rcx
|
||||
movl EFLAGS(%rsp),%r11d
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
xorq %r10,%r10
|
||||
xorq %r9,%r9
|
||||
xorq %r8,%r8
|
||||
TRACE_IRQS_ON
|
||||
movl RSP(%rsp),%esp
|
||||
CFI_RESTORE rsp
|
||||
/*
|
||||
* 64bit->32bit SYSRET restores eip from ecx,
|
||||
* eflags from r11 (but RF and VM bits are forced to 0),
|
||||
* cs and ss are loaded from MSRs.
|
||||
* (Note: 32bit->32bit SYSRET is different: since r11
|
||||
* does not exist, it merely sets eflags.IF=1).
|
||||
*
|
||||
* NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
|
||||
* descriptor is not reinitialized. This means that we must
|
||||
* avoid SYSRET with SS == NULL, which could happen if we schedule,
|
||||
* exit the kernel, and re-enter using an interrupt vector. (All
|
||||
* interrupt entries on x86_64 set SS to NULL.) We prevent that
|
||||
* from happening by reloading SS in __switch_to.
|
||||
*/
|
||||
USERGS_SYSRET32
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
cstar_auditsys:
|
||||
CFI_RESTORE_STATE
|
||||
movl %r9d,R9(%rsp) /* register to be clobbered by call */
|
||||
auditsys_entry_common
|
||||
movl R9(%rsp),%r9d /* reload 6th syscall arg */
|
||||
jmp cstar_dispatch
|
||||
|
||||
sysretl_audit:
|
||||
auditsys_exit sysretl_from_sys_call
|
||||
#endif
|
||||
|
||||
cstar_tracesys:
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz cstar_auditsys
|
||||
#endif
|
||||
xchgl %r9d,%ebp
|
||||
SAVE_EXTRA_REGS
|
||||
CLEAR_RREGS r9
|
||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
LOAD_ARGS32 1 /* reload args from stack in case ptrace changed it */
|
||||
RESTORE_EXTRA_REGS
|
||||
xchgl %ebp,%r9d
|
||||
jmp cstar_do_call
|
||||
END(ia32_cstar_target)
|
||||
|
||||
ia32_badarg:
|
||||
ASM_CLAC
|
||||
movq $-EFAULT,%rax
|
||||
jmp ia32_sysret
|
||||
CFI_ENDPROC
|
||||
|
||||
/*
|
||||
* Emulated IA32 system calls via int 0x80.
|
||||
*
|
||||
* Arguments:
|
||||
* eax system call number
|
||||
* ebx arg1
|
||||
* ecx arg2
|
||||
* edx arg3
|
||||
* esi arg4
|
||||
* edi arg5
|
||||
* ebp arg6 (note: not saved in the stack frame, should not be touched)
|
||||
*
|
||||
* Notes:
|
||||
* Uses the same stack frame as the x86-64 version.
|
||||
* All registers except eax must be saved (but ptrace may violate that).
|
||||
* Arguments are zero extended. For system calls that want sign extension and
|
||||
* take long arguments a wrapper is needed. Most calls can just be called
|
||||
* directly.
|
||||
* Assumes it is only called from user space and entered with interrupts off.
|
||||
*/
|
||||
|
||||
ENTRY(ia32_syscall)
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,5*8
|
||||
/*CFI_REL_OFFSET ss,4*8 */
|
||||
CFI_REL_OFFSET rsp,3*8
|
||||
/*CFI_REL_OFFSET rflags,2*8 */
|
||||
/*CFI_REL_OFFSET cs,1*8 */
|
||||
CFI_REL_OFFSET rip,0*8
|
||||
|
||||
/*
|
||||
* Interrupts are off on entry.
|
||||
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
|
||||
* it is too small to ever cause noticeable irq latency.
|
||||
*/
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
SWAPGS
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
|
||||
/* Zero-extending 32-bit regs, do not remove */
|
||||
movl %eax,%eax
|
||||
|
||||
/* Construct struct pt_regs on stack (iret frame is already on stack) */
|
||||
pushq_cfi_reg rax /* pt_regs->orig_ax */
|
||||
pushq_cfi_reg rdi /* pt_regs->di */
|
||||
pushq_cfi_reg rsi /* pt_regs->si */
|
||||
pushq_cfi_reg rdx /* pt_regs->dx */
|
||||
pushq_cfi_reg rcx /* pt_regs->cx */
|
||||
pushq_cfi $-ENOSYS /* pt_regs->ax */
|
||||
cld
|
||||
sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
|
||||
CFI_ADJUST_CFA_OFFSET 10*8
|
||||
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jnz ia32_tracesys
|
||||
ia32_do_call:
|
||||
/* 32bit syscall -> 64bit C ABI argument conversion */
|
||||
movl %edi,%r8d /* arg5 */
|
||||
movl %ebp,%r9d /* arg6 */
|
||||
xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx,%edi /* arg1 */
|
||||
movl %edx,%edx /* arg3 (zero extension) */
|
||||
cmpq $(IA32_NR_syscalls-1),%rax
|
||||
ja 1f
|
||||
call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
|
||||
ia32_sysret:
|
||||
movq %rax,RAX(%rsp)
|
||||
1:
|
||||
ia32_ret_from_sys_call:
|
||||
CLEAR_RREGS
|
||||
jmp int_ret_from_sys_call
|
||||
|
||||
ia32_tracesys:
|
||||
SAVE_EXTRA_REGS
|
||||
CLEAR_RREGS
|
||||
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
|
||||
RESTORE_EXTRA_REGS
|
||||
jmp ia32_do_call
|
||||
CFI_ENDPROC
|
||||
END(ia32_syscall)
|
||||
|
||||
.macro PTREGSCALL label, func
|
||||
ALIGN
|
||||
GLOBAL(\label)
|
||||
leaq \func(%rip),%rax
|
||||
jmp ia32_ptregs_common
|
||||
.endm
|
||||
|
||||
CFI_STARTPROC32
|
||||
|
||||
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
|
||||
PTREGSCALL stub32_sigreturn, sys32_sigreturn
|
||||
PTREGSCALL stub32_fork, sys_fork
|
||||
PTREGSCALL stub32_vfork, sys_vfork
|
||||
|
||||
ALIGN
|
||||
GLOBAL(stub32_clone)
|
||||
leaq sys_clone(%rip),%rax
|
||||
mov %r8, %rcx
|
||||
jmp ia32_ptregs_common
|
||||
|
||||
ALIGN
|
||||
ia32_ptregs_common:
|
||||
CFI_ENDPROC
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,SIZEOF_PTREGS
|
||||
CFI_REL_OFFSET rax,RAX
|
||||
CFI_REL_OFFSET rcx,RCX
|
||||
CFI_REL_OFFSET rdx,RDX
|
||||
CFI_REL_OFFSET rsi,RSI
|
||||
CFI_REL_OFFSET rdi,RDI
|
||||
CFI_REL_OFFSET rip,RIP
|
||||
/* CFI_REL_OFFSET cs,CS*/
|
||||
/* CFI_REL_OFFSET rflags,EFLAGS*/
|
||||
CFI_REL_OFFSET rsp,RSP
|
||||
/* CFI_REL_OFFSET ss,SS*/
|
||||
SAVE_EXTRA_REGS 8
|
||||
call *%rax
|
||||
RESTORE_EXTRA_REGS 8
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(ia32_ptregs_common)
|
|
@ -1,170 +0,0 @@
|
|||
#ifndef _ASM_X86_DWARF2_H
|
||||
#define _ASM_X86_DWARF2_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#warning "asm/dwarf2.h should be only included in pure assembly files"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros for dwarf2 CFI unwind table entries.
|
||||
* See "as.info" for details on these pseudo ops. Unfortunately
|
||||
* they are only supported in very new binutils, so define them
|
||||
* away for older version.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_AS_CFI
|
||||
|
||||
#define CFI_STARTPROC .cfi_startproc
|
||||
#define CFI_ENDPROC .cfi_endproc
|
||||
#define CFI_DEF_CFA .cfi_def_cfa
|
||||
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
|
||||
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
|
||||
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
|
||||
#define CFI_OFFSET .cfi_offset
|
||||
#define CFI_REL_OFFSET .cfi_rel_offset
|
||||
#define CFI_REGISTER .cfi_register
|
||||
#define CFI_RESTORE .cfi_restore
|
||||
#define CFI_REMEMBER_STATE .cfi_remember_state
|
||||
#define CFI_RESTORE_STATE .cfi_restore_state
|
||||
#define CFI_UNDEFINED .cfi_undefined
|
||||
#define CFI_ESCAPE .cfi_escape
|
||||
|
||||
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
|
||||
#define CFI_SIGNAL_FRAME .cfi_signal_frame
|
||||
#else
|
||||
#define CFI_SIGNAL_FRAME
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
|
||||
/*
|
||||
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
|
||||
* The latter we currently just discard since we don't do DWARF
|
||||
* unwinding at runtime. So only the offline DWARF information is
|
||||
* useful to anyone. Note we should not use this directive if this
|
||||
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
|
||||
* changed so it doesn't discard .eh_frame.
|
||||
*/
|
||||
.cfi_sections .debug_frame
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Due to the structure of pre-exisiting code, don't use assembler line
|
||||
* comment character # to ignore the arguments. Instead, use a dummy macro.
|
||||
*/
|
||||
.macro cfi_ignore a=0, b=0, c=0, d=0
|
||||
.endm
|
||||
|
||||
#define CFI_STARTPROC cfi_ignore
|
||||
#define CFI_ENDPROC cfi_ignore
|
||||
#define CFI_DEF_CFA cfi_ignore
|
||||
#define CFI_DEF_CFA_REGISTER cfi_ignore
|
||||
#define CFI_DEF_CFA_OFFSET cfi_ignore
|
||||
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
|
||||
#define CFI_OFFSET cfi_ignore
|
||||
#define CFI_REL_OFFSET cfi_ignore
|
||||
#define CFI_REGISTER cfi_ignore
|
||||
#define CFI_RESTORE cfi_ignore
|
||||
#define CFI_REMEMBER_STATE cfi_ignore
|
||||
#define CFI_RESTORE_STATE cfi_ignore
|
||||
#define CFI_UNDEFINED cfi_ignore
|
||||
#define CFI_ESCAPE cfi_ignore
|
||||
#define CFI_SIGNAL_FRAME cfi_ignore
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An attempt to make CFI annotations more or less
|
||||
* correct and shorter. It is implied that you know
|
||||
* what you're doing if you use them.
|
||||
*/
|
||||
#ifdef __ASSEMBLY__
|
||||
#ifdef CONFIG_X86_64
|
||||
.macro pushq_cfi reg
|
||||
pushq \reg
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
.endm
|
||||
|
||||
.macro pushq_cfi_reg reg
|
||||
pushq %\reg
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
CFI_REL_OFFSET \reg, 0
|
||||
.endm
|
||||
|
||||
.macro popq_cfi reg
|
||||
popq \reg
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
.endm
|
||||
|
||||
.macro popq_cfi_reg reg
|
||||
popq %\reg
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
|
||||
.macro pushfq_cfi
|
||||
pushfq
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
.endm
|
||||
|
||||
.macro popfq_cfi
|
||||
popfq
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
.endm
|
||||
|
||||
.macro movq_cfi reg offset=0
|
||||
movq %\reg, \offset(%rsp)
|
||||
CFI_REL_OFFSET \reg, \offset
|
||||
.endm
|
||||
|
||||
.macro movq_cfi_restore offset reg
|
||||
movq \offset(%rsp), %\reg
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#else /*!CONFIG_X86_64*/
|
||||
.macro pushl_cfi reg
|
||||
pushl \reg
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
.endm
|
||||
|
||||
.macro pushl_cfi_reg reg
|
||||
pushl %\reg
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
CFI_REL_OFFSET \reg, 0
|
||||
.endm
|
||||
|
||||
.macro popl_cfi reg
|
||||
popl \reg
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
.endm
|
||||
|
||||
.macro popl_cfi_reg reg
|
||||
popl %\reg
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
|
||||
.macro pushfl_cfi
|
||||
pushfl
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
.endm
|
||||
|
||||
.macro popfl_cfi
|
||||
popfl
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
.endm
|
||||
|
||||
.macro movl_cfi reg offset=0
|
||||
movl %\reg, \offset(%esp)
|
||||
CFI_REL_OFFSET \reg, \offset
|
||||
.endm
|
||||
|
||||
.macro movl_cfi_restore offset reg
|
||||
movl \offset(%esp), %\reg
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#endif /*!CONFIG_X86_64*/
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
#endif /* _ASM_X86_DWARF2_H */
|
|
@ -1,20 +1,17 @@
|
|||
#ifdef __ASSEMBLY__
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
/* The annotation hides the frame from the unwinder and makes it look
|
||||
like a ordinary ebp save/restore. This avoids some special cases for
|
||||
frame pointer later */
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
.macro FRAME
|
||||
__ASM_SIZE(push,_cfi) %__ASM_REG(bp)
|
||||
CFI_REL_OFFSET __ASM_REG(bp), 0
|
||||
__ASM_SIZE(push,) %__ASM_REG(bp)
|
||||
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
|
||||
.endm
|
||||
.macro ENDFRAME
|
||||
__ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
|
||||
CFI_RESTORE __ASM_REG(bp)
|
||||
__ASM_SIZE(pop,) %__ASM_REG(bp)
|
||||
.endm
|
||||
#else
|
||||
.macro FRAME
|
||||
|
|
|
@ -206,8 +206,13 @@ do { \
|
|||
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
|
||||
(u32)((val) >> 32))
|
||||
/*
|
||||
* 64-bit version of wrmsr_safe():
|
||||
*/
|
||||
static inline int wrmsrl_safe(u32 msr, u64 val)
|
||||
{
|
||||
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
|
||||
}
|
||||
|
||||
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
|
||||
|
||||
|
|
|
@ -5,12 +5,14 @@
|
|||
|
||||
/* misc architecture specific prototypes */
|
||||
|
||||
void system_call(void);
|
||||
void syscall_init(void);
|
||||
|
||||
void ia32_syscall(void);
|
||||
void ia32_cstar_target(void);
|
||||
void ia32_sysenter_target(void);
|
||||
void entry_SYSCALL_64(void);
|
||||
void entry_SYSCALL_compat(void);
|
||||
void entry_INT80_32(void);
|
||||
void entry_INT80_compat(void);
|
||||
void entry_SYSENTER_32(void);
|
||||
void entry_SYSENTER_compat(void);
|
||||
|
||||
void x86_configure_nx(void);
|
||||
void x86_report_nx(void);
|
||||
|
|
|
@ -231,11 +231,21 @@
|
|||
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* early_idt_handler_array is an array of entry points referenced in the
|
||||
* early IDT. For simplicity, it's a real array with one entry point
|
||||
* every nine bytes. That leaves room for an optional 'push $0' if the
|
||||
* vector has no error code (two bytes), a 'push $vector_number' (two
|
||||
* bytes), and a jump to the common entry code (up to five bytes).
|
||||
*/
|
||||
#define EARLY_IDT_HANDLER_SIZE 9
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
|
||||
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
|
||||
#ifdef CONFIG_TRACING
|
||||
# define trace_early_idt_handlers early_idt_handlers
|
||||
# define trace_early_idt_handler_array early_idt_handler_array
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
|
|||
|
||||
CFLAGS_irq.o := -I$(src)/../include/asm/trace
|
||||
|
||||
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
||||
obj-y := process_$(BITS).o signal.o
|
||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
|
||||
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||
|
@ -31,9 +31,6 @@ obj-y += probe_roms.o
|
|||
obj-$(CONFIG_X86_32) += i386_ksyms_32.o
|
||||
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
||||
obj-$(CONFIG_X86_64) += mcount_64.o
|
||||
obj-y += syscall_$(BITS).o vsyscall_gtod.o
|
||||
obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
|
||||
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
|
||||
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
|
||||
obj-$(CONFIG_SYSFS) += ksysfs.o
|
||||
obj-y += bootflag.o e820.o
|
||||
|
|
|
@ -66,7 +66,7 @@ int main(void)
|
|||
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
|
||||
DEFINE(NR_syscalls, sizeof(syscalls_64));
|
||||
|
||||
DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1);
|
||||
DEFINE(__NR_entry_INT80_compat_max, sizeof(syscalls_ia32) - 1);
|
||||
DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1026,7 +1026,7 @@ void enable_sep_cpu(void)
|
|||
(unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
|
||||
0);
|
||||
|
||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
|
||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
|
||||
|
||||
out:
|
||||
put_cpu();
|
||||
|
@ -1204,10 +1204,10 @@ void syscall_init(void)
|
|||
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
|
||||
*/
|
||||
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
|
||||
wrmsrl(MSR_LSTAR, system_call);
|
||||
wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
||||
wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
|
||||
/*
|
||||
* This only works on Intel CPUs.
|
||||
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
|
||||
|
@ -1216,7 +1216,7 @@ void syscall_init(void)
|
|||
*/
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
||||
#else
|
||||
wrmsrl(MSR_CSTAR, ignore_sysret);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
|
|||
clear_bss();
|
||||
|
||||
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
|
||||
set_intr_gate(i, early_idt_handlers[i]);
|
||||
set_intr_gate(i, early_idt_handler_array[i]);
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
|
||||
copy_bootdata(__va(real_mode_data));
|
||||
|
|
|
@ -478,21 +478,22 @@ is486:
|
|||
__INIT
|
||||
setup_once:
|
||||
/*
|
||||
* Set up a idt with 256 entries pointing to ignore_int,
|
||||
* interrupt gates. It doesn't actually load idt - that needs
|
||||
* to be done on each CPU. Interrupts are enabled elsewhere,
|
||||
* when we can be relatively sure everything is ok.
|
||||
* Set up a idt with 256 interrupt gates that push zero if there
|
||||
* is no error code and then jump to early_idt_handler_common.
|
||||
* It doesn't actually load the idt - that needs to be done on
|
||||
* each CPU. Interrupts are enabled elsewhere, when we can be
|
||||
* relatively sure everything is ok.
|
||||
*/
|
||||
|
||||
movl $idt_table,%edi
|
||||
movl $early_idt_handlers,%eax
|
||||
movl $early_idt_handler_array,%eax
|
||||
movl $NUM_EXCEPTION_VECTORS,%ecx
|
||||
1:
|
||||
movl %eax,(%edi)
|
||||
movl %eax,4(%edi)
|
||||
/* interrupt gate, dpl=0, present */
|
||||
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
|
||||
addl $9,%eax
|
||||
addl $EARLY_IDT_HANDLER_SIZE,%eax
|
||||
addl $8,%edi
|
||||
loop 1b
|
||||
|
||||
|
@ -524,26 +525,28 @@ setup_once:
|
|||
andl $0,setup_once_ref /* Once is enough, thanks */
|
||||
ret
|
||||
|
||||
ENTRY(early_idt_handlers)
|
||||
ENTRY(early_idt_handler_array)
|
||||
# 36(%esp) %eflags
|
||||
# 32(%esp) %cs
|
||||
# 28(%esp) %eip
|
||||
# 24(%rsp) error code
|
||||
i = 0
|
||||
.rept NUM_EXCEPTION_VECTORS
|
||||
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
|
||||
ASM_NOP2
|
||||
.else
|
||||
.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
|
||||
pushl $0 # Dummy error code, to make stack frame uniform
|
||||
.endif
|
||||
pushl $i # 20(%esp) Vector number
|
||||
jmp early_idt_handler
|
||||
jmp early_idt_handler_common
|
||||
i = i + 1
|
||||
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
||||
.endr
|
||||
ENDPROC(early_idt_handlers)
|
||||
ENDPROC(early_idt_handler_array)
|
||||
|
||||
/* This is global to keep gas from relaxing the jumps */
|
||||
ENTRY(early_idt_handler)
|
||||
early_idt_handler_common:
|
||||
/*
|
||||
* The stack is the hardware frame, an error code or zero, and the
|
||||
* vector number.
|
||||
*/
|
||||
cld
|
||||
|
||||
cmpl $2,(%esp) # X86_TRAP_NMI
|
||||
|
@ -603,7 +606,7 @@ ex_entry:
|
|||
.Lis_nmi:
|
||||
addl $8,%esp /* drop vector number and error code */
|
||||
iret
|
||||
ENDPROC(early_idt_handler)
|
||||
ENDPROC(early_idt_handler_common)
|
||||
|
||||
/* This is the default interrupt "handler" :-) */
|
||||
ALIGN
|
||||
|
|
|
@ -321,26 +321,28 @@ bad_address:
|
|||
jmp bad_address
|
||||
|
||||
__INIT
|
||||
.globl early_idt_handlers
|
||||
early_idt_handlers:
|
||||
ENTRY(early_idt_handler_array)
|
||||
# 104(%rsp) %rflags
|
||||
# 96(%rsp) %cs
|
||||
# 88(%rsp) %rip
|
||||
# 80(%rsp) error code
|
||||
i = 0
|
||||
.rept NUM_EXCEPTION_VECTORS
|
||||
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
|
||||
ASM_NOP2
|
||||
.else
|
||||
.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
|
||||
pushq $0 # Dummy error code, to make stack frame uniform
|
||||
.endif
|
||||
pushq $i # 72(%rsp) Vector number
|
||||
jmp early_idt_handler
|
||||
jmp early_idt_handler_common
|
||||
i = i + 1
|
||||
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
||||
.endr
|
||||
ENDPROC(early_idt_handler_array)
|
||||
|
||||
/* This is global to keep gas from relaxing the jumps */
|
||||
ENTRY(early_idt_handler)
|
||||
early_idt_handler_common:
|
||||
/*
|
||||
* The stack is the hardware frame, an error code or zero, and the
|
||||
* vector number.
|
||||
*/
|
||||
cld
|
||||
|
||||
cmpl $2,(%rsp) # X86_TRAP_NMI
|
||||
|
@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
|
|||
.Lis_nmi:
|
||||
addq $16,%rsp # drop vector number and error code
|
||||
INTERRUPT_RETURN
|
||||
ENDPROC(early_idt_handler)
|
||||
ENDPROC(early_idt_handler_common)
|
||||
|
||||
__INITDATA
|
||||
|
||||
|
|
|
@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
|
|||
#else
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
asmlinkage int system_call(void);
|
||||
#include <asm/proto.h>
|
||||
#endif
|
||||
|
||||
/* Must be page-aligned because the real IDT is used in a fixmap. */
|
||||
|
@ -980,12 +979,12 @@ void __init trap_init(void)
|
|||
set_bit(i, used_vectors);
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
|
||||
set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
set_system_trap_gate(IA32_SYSCALL_VECTOR, &system_call);
|
||||
set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ clean-files := inat-tables.c
|
|||
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
|
||||
|
||||
lib-y := delay.o misc.o cmdline.o
|
||||
lib-y += thunk_$(BITS).o
|
||||
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
||||
lib-y += memcpy_$(BITS).o
|
||||
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
|
||||
|
|
|
@ -11,26 +11,23 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
/* if you want SMP support, implement these with real spinlocks */
|
||||
.macro LOCK reg
|
||||
pushfl_cfi
|
||||
pushfl
|
||||
cli
|
||||
.endm
|
||||
|
||||
.macro UNLOCK reg
|
||||
popfl_cfi
|
||||
popfl
|
||||
.endm
|
||||
|
||||
#define BEGIN(op) \
|
||||
.macro endp; \
|
||||
CFI_ENDPROC; \
|
||||
ENDPROC(atomic64_##op##_386); \
|
||||
.purgem endp; \
|
||||
.endm; \
|
||||
ENTRY(atomic64_##op##_386); \
|
||||
CFI_STARTPROC; \
|
||||
LOCK v;
|
||||
|
||||
#define ENDP endp
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
.macro read64 reg
|
||||
movl %ebx, %eax
|
||||
|
@ -22,16 +21,11 @@
|
|||
.endm
|
||||
|
||||
ENTRY(atomic64_read_cx8)
|
||||
CFI_STARTPROC
|
||||
|
||||
read64 %ecx
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_read_cx8)
|
||||
|
||||
ENTRY(atomic64_set_cx8)
|
||||
CFI_STARTPROC
|
||||
|
||||
1:
|
||||
/* we don't need LOCK_PREFIX since aligned 64-bit writes
|
||||
* are atomic on 586 and newer */
|
||||
|
@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
|
|||
jne 1b
|
||||
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_set_cx8)
|
||||
|
||||
ENTRY(atomic64_xchg_cx8)
|
||||
CFI_STARTPROC
|
||||
|
||||
1:
|
||||
LOCK_PREFIX
|
||||
cmpxchg8b (%esi)
|
||||
jne 1b
|
||||
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_xchg_cx8)
|
||||
|
||||
.macro addsub_return func ins insc
|
||||
ENTRY(atomic64_\func\()_return_cx8)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebp
|
||||
pushl_cfi_reg ebx
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg edi
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
|
||||
movl %eax, %esi
|
||||
movl %edx, %edi
|
||||
|
@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
|
|||
10:
|
||||
movl %ebx, %eax
|
||||
movl %ecx, %edx
|
||||
popl_cfi_reg edi
|
||||
popl_cfi_reg esi
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_\func\()_return_cx8)
|
||||
.endm
|
||||
|
||||
|
@ -93,8 +81,7 @@ addsub_return sub sub sbb
|
|||
|
||||
.macro incdec_return func ins insc
|
||||
ENTRY(atomic64_\func\()_return_cx8)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebx
|
||||
pushl %ebx
|
||||
|
||||
read64 %esi
|
||||
1:
|
||||
|
@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
|
|||
10:
|
||||
movl %ebx, %eax
|
||||
movl %ecx, %edx
|
||||
popl_cfi_reg ebx
|
||||
popl %ebx
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_\func\()_return_cx8)
|
||||
.endm
|
||||
|
||||
|
@ -119,8 +105,7 @@ incdec_return inc add adc
|
|||
incdec_return dec sub sbb
|
||||
|
||||
ENTRY(atomic64_dec_if_positive_cx8)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebx
|
||||
pushl %ebx
|
||||
|
||||
read64 %esi
|
||||
1:
|
||||
|
@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
|
|||
2:
|
||||
movl %ebx, %eax
|
||||
movl %ecx, %edx
|
||||
popl_cfi_reg ebx
|
||||
popl %ebx
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_dec_if_positive_cx8)
|
||||
|
||||
ENTRY(atomic64_add_unless_cx8)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebp
|
||||
pushl_cfi_reg ebx
|
||||
pushl %ebp
|
||||
pushl %ebx
|
||||
/* these just push these two parameters on the stack */
|
||||
pushl_cfi_reg edi
|
||||
pushl_cfi_reg ecx
|
||||
pushl %edi
|
||||
pushl %ecx
|
||||
|
||||
movl %eax, %ebp
|
||||
movl %edx, %edi
|
||||
|
@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
|
|||
movl $1, %eax
|
||||
3:
|
||||
addl $8, %esp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg ebp
|
||||
popl %ebx
|
||||
popl %ebp
|
||||
ret
|
||||
4:
|
||||
cmpl %edx, 4(%esp)
|
||||
jne 2b
|
||||
xorl %eax, %eax
|
||||
jmp 3b
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_add_unless_cx8)
|
||||
|
||||
ENTRY(atomic64_inc_not_zero_cx8)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebx
|
||||
pushl %ebx
|
||||
|
||||
read64 %esi
|
||||
1:
|
||||
|
@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
|
|||
|
||||
movl $1, %eax
|
||||
3:
|
||||
popl_cfi_reg ebx
|
||||
popl %ebx
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(atomic64_inc_not_zero_cx8)
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
|
@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
|
|||
* alignment for the unrolled loop.
|
||||
*/
|
||||
ENTRY(csum_partial)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg ebx
|
||||
pushl %esi
|
||||
pushl %ebx
|
||||
movl 20(%esp),%eax # Function arg: unsigned int sum
|
||||
movl 16(%esp),%ecx # Function arg: int len
|
||||
movl 12(%esp),%esi # Function arg: unsigned char *buff
|
||||
|
@ -129,10 +127,9 @@ ENTRY(csum_partial)
|
|||
jz 8f
|
||||
roll $8, %eax
|
||||
8:
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg esi
|
||||
popl %ebx
|
||||
popl %esi
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(csum_partial)
|
||||
|
||||
#else
|
||||
|
@ -140,9 +137,8 @@ ENDPROC(csum_partial)
|
|||
/* Version for PentiumII/PPro */
|
||||
|
||||
ENTRY(csum_partial)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg ebx
|
||||
pushl %esi
|
||||
pushl %ebx
|
||||
movl 20(%esp),%eax # Function arg: unsigned int sum
|
||||
movl 16(%esp),%ecx # Function arg: int len
|
||||
movl 12(%esp),%esi # Function arg: const unsigned char *buf
|
||||
|
@ -249,10 +245,9 @@ ENTRY(csum_partial)
|
|||
jz 90f
|
||||
roll $8, %eax
|
||||
90:
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg esi
|
||||
popl %ebx
|
||||
popl %esi
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(csum_partial)
|
||||
|
||||
#endif
|
||||
|
@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
|
|||
#define FP 12
|
||||
|
||||
ENTRY(csum_partial_copy_generic)
|
||||
CFI_STARTPROC
|
||||
subl $4,%esp
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
pushl_cfi_reg edi
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg ebx
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %ebx
|
||||
movl ARGBASE+16(%esp),%eax # sum
|
||||
movl ARGBASE+12(%esp),%ecx # len
|
||||
movl ARGBASE+4(%esp),%esi # src
|
||||
|
@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
|
|||
|
||||
.previous
|
||||
|
||||
popl_cfi_reg ebx
|
||||
popl_cfi_reg esi
|
||||
popl_cfi_reg edi
|
||||
popl_cfi %ecx # equivalent to addl $4,%esp
|
||||
popl %ebx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ecx # equivalent to addl $4,%esp
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(csum_partial_copy_generic)
|
||||
|
||||
#else
|
||||
|
@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
|
|||
#define ARGBASE 12
|
||||
|
||||
ENTRY(csum_partial_copy_generic)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebx
|
||||
pushl_cfi_reg edi
|
||||
pushl_cfi_reg esi
|
||||
pushl %ebx
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
movl ARGBASE+4(%esp),%esi #src
|
||||
movl ARGBASE+8(%esp),%edi #dst
|
||||
movl ARGBASE+12(%esp),%ecx #len
|
||||
|
@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
|
|||
jmp 7b
|
||||
.previous
|
||||
|
||||
popl_cfi_reg esi
|
||||
popl_cfi_reg edi
|
||||
popl_cfi_reg ebx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebx
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(csum_partial_copy_generic)
|
||||
|
||||
#undef ROUND
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
|
@ -15,7 +14,6 @@
|
|||
* %rdi - page
|
||||
*/
|
||||
ENTRY(clear_page)
|
||||
CFI_STARTPROC
|
||||
|
||||
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
|
||||
"jmp clear_page_c_e", X86_FEATURE_ERMS
|
||||
|
@ -24,11 +22,9 @@ ENTRY(clear_page)
|
|||
xorl %eax,%eax
|
||||
rep stosq
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(clear_page)
|
||||
|
||||
ENTRY(clear_page_orig)
|
||||
CFI_STARTPROC
|
||||
|
||||
xorl %eax,%eax
|
||||
movl $4096/64,%ecx
|
||||
|
@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
|
|||
jnz .Lloop
|
||||
nop
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(clear_page_orig)
|
||||
|
||||
ENTRY(clear_page_c_e)
|
||||
CFI_STARTPROC
|
||||
movl $4096,%ecx
|
||||
xorl %eax,%eax
|
||||
rep stosb
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(clear_page_c_e)
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
*
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
.text
|
||||
|
@ -21,7 +20,6 @@
|
|||
* %al : Operation successful
|
||||
*/
|
||||
ENTRY(this_cpu_cmpxchg16b_emu)
|
||||
CFI_STARTPROC
|
||||
|
||||
#
|
||||
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
|
||||
|
@ -32,7 +30,7 @@ CFI_STARTPROC
|
|||
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
|
||||
# macros).
|
||||
#
|
||||
pushfq_cfi
|
||||
pushfq
|
||||
cli
|
||||
|
||||
cmpq PER_CPU_VAR((%rsi)), %rax
|
||||
|
@ -43,17 +41,13 @@ CFI_STARTPROC
|
|||
movq %rbx, PER_CPU_VAR((%rsi))
|
||||
movq %rcx, PER_CPU_VAR(8(%rsi))
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
popfq_cfi
|
||||
popfq
|
||||
mov $1, %al
|
||||
ret
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
.Lnot_same:
|
||||
popfq_cfi
|
||||
popfq
|
||||
xor %al,%al
|
||||
ret
|
||||
|
||||
CFI_ENDPROC
|
||||
|
||||
ENDPROC(this_cpu_cmpxchg16b_emu)
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -20,14 +19,13 @@
|
|||
* %ecx : high 32 bits of new value
|
||||
*/
|
||||
ENTRY(cmpxchg8b_emu)
|
||||
CFI_STARTPROC
|
||||
|
||||
#
|
||||
# Emulate 'cmpxchg8b (%esi)' on UP except we don't
|
||||
# set the whole ZF thing (caller will just compare
|
||||
# eax:edx with the expected value)
|
||||
#
|
||||
pushfl_cfi
|
||||
pushfl
|
||||
cli
|
||||
|
||||
cmpl (%esi), %eax
|
||||
|
@ -38,18 +36,15 @@ CFI_STARTPROC
|
|||
movl %ebx, (%esi)
|
||||
movl %ecx, 4(%esi)
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
popfl_cfi
|
||||
popfl
|
||||
ret
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
.Lnot_same:
|
||||
movl (%esi), %eax
|
||||
.Lhalf_same:
|
||||
movl 4(%esi), %edx
|
||||
|
||||
popfl_cfi
|
||||
popfl
|
||||
ret
|
||||
|
||||
CFI_ENDPROC
|
||||
ENDPROC(cmpxchg8b_emu)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
|
@ -13,22 +12,16 @@
|
|||
*/
|
||||
ALIGN
|
||||
ENTRY(copy_page)
|
||||
CFI_STARTPROC
|
||||
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
|
||||
movl $4096/8, %ecx
|
||||
rep movsq
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(copy_page)
|
||||
|
||||
ENTRY(copy_page_regs)
|
||||
CFI_STARTPROC
|
||||
subq $2*8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET 2*8
|
||||
movq %rbx, (%rsp)
|
||||
CFI_REL_OFFSET rbx, 0
|
||||
movq %r12, 1*8(%rsp)
|
||||
CFI_REL_OFFSET r12, 1*8
|
||||
|
||||
movl $(4096/64)-5, %ecx
|
||||
.p2align 4
|
||||
|
@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
|
|||
jnz .Loop2
|
||||
|
||||
movq (%rsp), %rbx
|
||||
CFI_RESTORE rbx
|
||||
movq 1*8(%rsp), %r12
|
||||
CFI_RESTORE r12
|
||||
addq $2*8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -2*8
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(copy_page_regs)
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
@ -18,7 +17,6 @@
|
|||
|
||||
/* Standard copy_to_user with segment limit checking */
|
||||
ENTRY(_copy_to_user)
|
||||
CFI_STARTPROC
|
||||
GET_THREAD_INFO(%rax)
|
||||
movq %rdi,%rcx
|
||||
addq %rdx,%rcx
|
||||
|
@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
|
|||
X86_FEATURE_REP_GOOD, \
|
||||
"jmp copy_user_enhanced_fast_string", \
|
||||
X86_FEATURE_ERMS
|
||||
CFI_ENDPROC
|
||||
ENDPROC(_copy_to_user)
|
||||
|
||||
/* Standard copy_from_user with segment limit checking */
|
||||
ENTRY(_copy_from_user)
|
||||
CFI_STARTPROC
|
||||
GET_THREAD_INFO(%rax)
|
||||
movq %rsi,%rcx
|
||||
addq %rdx,%rcx
|
||||
|
@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
|
|||
X86_FEATURE_REP_GOOD, \
|
||||
"jmp copy_user_enhanced_fast_string", \
|
||||
X86_FEATURE_ERMS
|
||||
CFI_ENDPROC
|
||||
ENDPROC(_copy_from_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
/* must zero dest */
|
||||
ENTRY(bad_from_user)
|
||||
bad_from_user:
|
||||
CFI_STARTPROC
|
||||
movl %edx,%ecx
|
||||
xorl %eax,%eax
|
||||
rep
|
||||
|
@ -62,7 +56,6 @@ bad_from_user:
|
|||
bad_to_user:
|
||||
movl %edx,%eax
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(bad_from_user)
|
||||
.previous
|
||||
|
||||
|
@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
|
|||
* eax uncopied bytes or 0 if successful.
|
||||
*/
|
||||
ENTRY(copy_user_generic_unrolled)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
cmpl $8,%edx
|
||||
jb 20f /* less then 8 bytes, go to byte copy loop */
|
||||
|
@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
|
|||
_ASM_EXTABLE(19b,40b)
|
||||
_ASM_EXTABLE(21b,50b)
|
||||
_ASM_EXTABLE(22b,50b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(copy_user_generic_unrolled)
|
||||
|
||||
/* Some CPUs run faster using the string copy instructions.
|
||||
|
@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
|
|||
* eax uncopied bytes or 0 if successful.
|
||||
*/
|
||||
ENTRY(copy_user_generic_string)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
cmpl $8,%edx
|
||||
jb 2f /* less than 8 bytes, go to byte copy loop */
|
||||
|
@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
|
|||
|
||||
_ASM_EXTABLE(1b,11b)
|
||||
_ASM_EXTABLE(3b,12b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(copy_user_generic_string)
|
||||
|
||||
/*
|
||||
|
@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
|
|||
* eax uncopied bytes or 0 if successful.
|
||||
*/
|
||||
ENTRY(copy_user_enhanced_fast_string)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
movl %edx,%ecx
|
||||
1: rep
|
||||
|
@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
|
|||
.previous
|
||||
|
||||
_ASM_EXTABLE(1b,12b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(copy_user_enhanced_fast_string)
|
||||
|
||||
/*
|
||||
|
@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
|
|||
* This will force destination/source out of cache for more performance.
|
||||
*/
|
||||
ENTRY(__copy_user_nocache)
|
||||
CFI_STARTPROC
|
||||
ASM_STAC
|
||||
cmpl $8,%edx
|
||||
jb 20f /* less then 8 bytes, go to byte copy loop */
|
||||
|
@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
|
|||
_ASM_EXTABLE(19b,40b)
|
||||
_ASM_EXTABLE(21b,50b)
|
||||
_ASM_EXTABLE(22b,50b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__copy_user_nocache)
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* for more details. No warranty for anything given at all.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
|
@ -47,23 +46,16 @@
|
|||
|
||||
|
||||
ENTRY(csum_partial_copy_generic)
|
||||
CFI_STARTPROC
|
||||
cmpl $3*64, %edx
|
||||
jle .Lignore
|
||||
|
||||
.Lignore:
|
||||
subq $7*8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET 7*8
|
||||
movq %rbx, 2*8(%rsp)
|
||||
CFI_REL_OFFSET rbx, 2*8
|
||||
movq %r12, 3*8(%rsp)
|
||||
CFI_REL_OFFSET r12, 3*8
|
||||
movq %r14, 4*8(%rsp)
|
||||
CFI_REL_OFFSET r14, 4*8
|
||||
movq %r13, 5*8(%rsp)
|
||||
CFI_REL_OFFSET r13, 5*8
|
||||
movq %rbp, 6*8(%rsp)
|
||||
CFI_REL_OFFSET rbp, 6*8
|
||||
|
||||
movq %r8, (%rsp)
|
||||
movq %r9, 1*8(%rsp)
|
||||
|
@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
|
|||
addl %ebx, %eax
|
||||
adcl %r9d, %eax /* carry */
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
.Lende:
|
||||
movq 2*8(%rsp), %rbx
|
||||
CFI_RESTORE rbx
|
||||
movq 3*8(%rsp), %r12
|
||||
CFI_RESTORE r12
|
||||
movq 4*8(%rsp), %r14
|
||||
CFI_RESTORE r14
|
||||
movq 5*8(%rsp), %r13
|
||||
CFI_RESTORE r13
|
||||
movq 6*8(%rsp), %rbp
|
||||
CFI_RESTORE rbp
|
||||
addq $7*8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -7*8
|
||||
ret
|
||||
CFI_RESTORE_STATE
|
||||
|
||||
/* Exception handlers. Very simple, zeroing is done in the wrappers */
|
||||
.Lbad_source:
|
||||
|
@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
|
|||
jz .Lende
|
||||
movl $-EFAULT, (%rax)
|
||||
jmp .Lende
|
||||
CFI_ENDPROC
|
||||
ENDPROC(csum_partial_copy_generic)
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
@ -36,7 +35,6 @@
|
|||
|
||||
.text
|
||||
ENTRY(__get_user_1)
|
||||
CFI_STARTPROC
|
||||
GET_THREAD_INFO(%_ASM_DX)
|
||||
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
||||
jae bad_get_user
|
||||
|
@ -45,11 +43,9 @@ ENTRY(__get_user_1)
|
|||
xor %eax,%eax
|
||||
ASM_CLAC
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
CFI_STARTPROC
|
||||
add $1,%_ASM_AX
|
||||
jc bad_get_user
|
||||
GET_THREAD_INFO(%_ASM_DX)
|
||||
|
@ -60,11 +56,9 @@ ENTRY(__get_user_2)
|
|||
xor %eax,%eax
|
||||
ASM_CLAC
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
CFI_STARTPROC
|
||||
add $3,%_ASM_AX
|
||||
jc bad_get_user
|
||||
GET_THREAD_INFO(%_ASM_DX)
|
||||
|
@ -75,11 +69,9 @@ ENTRY(__get_user_4)
|
|||
xor %eax,%eax
|
||||
ASM_CLAC
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__get_user_4)
|
||||
|
||||
ENTRY(__get_user_8)
|
||||
CFI_STARTPROC
|
||||
#ifdef CONFIG_X86_64
|
||||
add $7,%_ASM_AX
|
||||
jc bad_get_user
|
||||
|
@ -104,28 +96,23 @@ ENTRY(__get_user_8)
|
|||
ASM_CLAC
|
||||
ret
|
||||
#endif
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__get_user_8)
|
||||
|
||||
|
||||
bad_get_user:
|
||||
CFI_STARTPROC
|
||||
xor %edx,%edx
|
||||
mov $(-EFAULT),%_ASM_AX
|
||||
ASM_CLAC
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(bad_get_user)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
bad_get_user_8:
|
||||
CFI_STARTPROC
|
||||
xor %edx,%edx
|
||||
xor %ecx,%ecx
|
||||
mov $(-EFAULT),%_ASM_AX
|
||||
ASM_CLAC
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(bad_get_user_8)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -16,15 +16,12 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
/*
|
||||
* override generic version in lib/iomap_copy.c
|
||||
*/
|
||||
ENTRY(__iowrite32_copy)
|
||||
CFI_STARTPROC
|
||||
movl %edx,%ecx
|
||||
rep movsd
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__iowrite32_copy)
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
/*
|
||||
|
@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
|
|||
ENDPROC(memcpy_erms)
|
||||
|
||||
ENTRY(memcpy_orig)
|
||||
CFI_STARTPROC
|
||||
movq %rdi, %rax
|
||||
|
||||
cmpq $0x20, %rdx
|
||||
|
@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
|
|||
|
||||
.Lend:
|
||||
retq
|
||||
CFI_ENDPROC
|
||||
ENDPROC(memcpy_orig)
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
|
@ -27,7 +26,6 @@
|
|||
|
||||
ENTRY(memmove)
|
||||
ENTRY(__memmove)
|
||||
CFI_STARTPROC
|
||||
|
||||
/* Handle more 32 bytes in loop */
|
||||
mov %rdi, %rax
|
||||
|
@ -207,6 +205,5 @@ ENTRY(__memmove)
|
|||
movb %r11b, (%rdi)
|
||||
13:
|
||||
retq
|
||||
CFI_ENDPROC
|
||||
ENDPROC(__memmove)
|
||||
ENDPROC(memmove)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
/* Copyright 2002 Andi Kleen, SuSE Labs */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
|
@ -66,7 +65,6 @@ ENTRY(memset_erms)
|
|||
ENDPROC(memset_erms)
|
||||
|
||||
ENTRY(memset_orig)
|
||||
CFI_STARTPROC
|
||||
movq %rdi,%r10
|
||||
|
||||
/* expand byte value */
|
||||
|
@ -78,7 +76,6 @@ ENTRY(memset_orig)
|
|||
movl %edi,%r9d
|
||||
andl $7,%r9d
|
||||
jnz .Lbad_alignment
|
||||
CFI_REMEMBER_STATE
|
||||
.Lafter_bad_alignment:
|
||||
|
||||
movq %rdx,%rcx
|
||||
|
@ -128,7 +125,6 @@ ENTRY(memset_orig)
|
|||
movq %r10,%rax
|
||||
ret
|
||||
|
||||
CFI_RESTORE_STATE
|
||||
.Lbad_alignment:
|
||||
cmpq $7,%rdx
|
||||
jbe .Lhandle_7
|
||||
|
@ -139,5 +135,4 @@ ENTRY(memset_orig)
|
|||
subq %r8,%rdx
|
||||
jmp .Lafter_bad_alignment
|
||||
.Lfinal:
|
||||
CFI_ENDPROC
|
||||
ENDPROC(memset_orig)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
|
@ -13,9 +12,8 @@
|
|||
*/
|
||||
.macro op_safe_regs op
|
||||
ENTRY(\op\()_safe_regs)
|
||||
CFI_STARTPROC
|
||||
pushq_cfi_reg rbx
|
||||
pushq_cfi_reg rbp
|
||||
pushq %rbx
|
||||
pushq %rbp
|
||||
movq %rdi, %r10 /* Save pointer */
|
||||
xorl %r11d, %r11d /* Return value */
|
||||
movl (%rdi), %eax
|
||||
|
@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
|
|||
movl 20(%rdi), %ebp
|
||||
movl 24(%rdi), %esi
|
||||
movl 28(%rdi), %edi
|
||||
CFI_REMEMBER_STATE
|
||||
1: \op
|
||||
2: movl %eax, (%r10)
|
||||
movl %r11d, %eax /* Return value */
|
||||
|
@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
|
|||
movl %ebp, 20(%r10)
|
||||
movl %esi, 24(%r10)
|
||||
movl %edi, 28(%r10)
|
||||
popq_cfi_reg rbp
|
||||
popq_cfi_reg rbx
|
||||
popq %rbp
|
||||
popq %rbx
|
||||
ret
|
||||
3:
|
||||
CFI_RESTORE_STATE
|
||||
movl $-EIO, %r11d
|
||||
jmp 2b
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(\op\()_safe_regs)
|
||||
.endm
|
||||
|
||||
|
@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
|
|||
|
||||
.macro op_safe_regs op
|
||||
ENTRY(\op\()_safe_regs)
|
||||
CFI_STARTPROC
|
||||
pushl_cfi_reg ebx
|
||||
pushl_cfi_reg ebp
|
||||
pushl_cfi_reg esi
|
||||
pushl_cfi_reg edi
|
||||
pushl_cfi $0 /* Return value */
|
||||
pushl_cfi %eax
|
||||
pushl %ebx
|
||||
pushl %ebp
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl $0 /* Return value */
|
||||
pushl %eax
|
||||
movl 4(%eax), %ecx
|
||||
movl 8(%eax), %edx
|
||||
movl 12(%eax), %ebx
|
||||
|
@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
|
|||
movl 24(%eax), %esi
|
||||
movl 28(%eax), %edi
|
||||
movl (%eax), %eax
|
||||
CFI_REMEMBER_STATE
|
||||
1: \op
|
||||
2: pushl_cfi %eax
|
||||
2: pushl %eax
|
||||
movl 4(%esp), %eax
|
||||
popl_cfi (%eax)
|
||||
popl (%eax)
|
||||
addl $4, %esp
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
movl %ecx, 4(%eax)
|
||||
movl %edx, 8(%eax)
|
||||
movl %ebx, 12(%eax)
|
||||
movl %ebp, 20(%eax)
|
||||
movl %esi, 24(%eax)
|
||||
movl %edi, 28(%eax)
|
||||
popl_cfi %eax
|
||||
popl_cfi_reg edi
|
||||
popl_cfi_reg esi
|
||||
popl_cfi_reg ebp
|
||||
popl_cfi_reg ebx
|
||||
popl %eax
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
ret
|
||||
3:
|
||||
CFI_RESTORE_STATE
|
||||
movl $-EIO, 4(%esp)
|
||||
jmp 2b
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
CFI_ENDPROC
|
||||
ENDPROC(\op\()_safe_regs)
|
||||
.endm
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
* return value.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/asm.h>
|
||||
|
@ -30,11 +29,9 @@
|
|||
* as they get called from within inline assembly.
|
||||
*/
|
||||
|
||||
#define ENTER CFI_STARTPROC ; \
|
||||
GET_THREAD_INFO(%_ASM_BX)
|
||||
#define ENTER GET_THREAD_INFO(%_ASM_BX)
|
||||
#define EXIT ASM_CLAC ; \
|
||||
ret ; \
|
||||
CFI_ENDPROC
|
||||
ret
|
||||
|
||||
.text
|
||||
ENTRY(__put_user_1)
|
||||
|
@ -87,7 +84,6 @@ ENTRY(__put_user_8)
|
|||
ENDPROC(__put_user_8)
|
||||
|
||||
bad_put_user:
|
||||
CFI_STARTPROC
|
||||
movl $-EFAULT,%eax
|
||||
EXIT
|
||||
END(bad_put_user)
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
|
||||
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
|
||||
|
@ -34,10 +33,10 @@
|
|||
*/
|
||||
|
||||
#define save_common_regs \
|
||||
pushl_cfi_reg ecx
|
||||
pushl %ecx
|
||||
|
||||
#define restore_common_regs \
|
||||
popl_cfi_reg ecx
|
||||
popl %ecx
|
||||
|
||||
/* Avoid uglifying the argument copying x86-64 needs to do. */
|
||||
.macro movq src, dst
|
||||
|
@ -64,50 +63,45 @@
|
|||
*/
|
||||
|
||||
#define save_common_regs \
|
||||
pushq_cfi_reg rdi; \
|
||||
pushq_cfi_reg rsi; \
|
||||
pushq_cfi_reg rcx; \
|
||||
pushq_cfi_reg r8; \
|
||||
pushq_cfi_reg r9; \
|
||||
pushq_cfi_reg r10; \
|
||||
pushq_cfi_reg r11
|
||||
pushq %rdi; \
|
||||
pushq %rsi; \
|
||||
pushq %rcx; \
|
||||
pushq %r8; \
|
||||
pushq %r9; \
|
||||
pushq %r10; \
|
||||
pushq %r11
|
||||
|
||||
#define restore_common_regs \
|
||||
popq_cfi_reg r11; \
|
||||
popq_cfi_reg r10; \
|
||||
popq_cfi_reg r9; \
|
||||
popq_cfi_reg r8; \
|
||||
popq_cfi_reg rcx; \
|
||||
popq_cfi_reg rsi; \
|
||||
popq_cfi_reg rdi
|
||||
popq %r11; \
|
||||
popq %r10; \
|
||||
popq %r9; \
|
||||
popq %r8; \
|
||||
popq %rcx; \
|
||||
popq %rsi; \
|
||||
popq %rdi
|
||||
|
||||
#endif
|
||||
|
||||
/* Fix up special calling conventions */
|
||||
ENTRY(call_rwsem_down_read_failed)
|
||||
CFI_STARTPROC
|
||||
save_common_regs
|
||||
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
|
||||
__ASM_SIZE(push,) %__ASM_REG(dx)
|
||||
movq %rax,%rdi
|
||||
call rwsem_down_read_failed
|
||||
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
|
||||
__ASM_SIZE(pop,) %__ASM_REG(dx)
|
||||
restore_common_regs
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(call_rwsem_down_read_failed)
|
||||
|
||||
ENTRY(call_rwsem_down_write_failed)
|
||||
CFI_STARTPROC
|
||||
save_common_regs
|
||||
movq %rax,%rdi
|
||||
call rwsem_down_write_failed
|
||||
restore_common_regs
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(call_rwsem_down_write_failed)
|
||||
|
||||
ENTRY(call_rwsem_wake)
|
||||
CFI_STARTPROC
|
||||
/* do nothing if still outstanding active readers */
|
||||
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
|
||||
jnz 1f
|
||||
|
@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
|
|||
call rwsem_wake
|
||||
restore_common_regs
|
||||
1: ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(call_rwsem_wake)
|
||||
|
||||
ENTRY(call_rwsem_downgrade_wake)
|
||||
CFI_STARTPROC
|
||||
save_common_regs
|
||||
__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
|
||||
__ASM_SIZE(push,) %__ASM_REG(dx)
|
||||
movq %rax,%rdi
|
||||
call rwsem_downgrade_wake
|
||||
__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
|
||||
__ASM_SIZE(pop,) %__ASM_REG(dx)
|
||||
restore_common_regs
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
ENDPROC(call_rwsem_downgrade_wake)
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
* of the License.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/dwarf2.h>
|
||||
|
||||
/*
|
||||
* Calling convention :
|
||||
|
|
|
@ -26,7 +26,7 @@ else
|
|||
|
||||
obj-y += syscalls_64.o vdso/
|
||||
|
||||
subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../lib/thunk_64.o \
|
||||
subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
|
||||
../lib/rwsem.o
|
||||
|
||||
endif
|
||||
|
|
|
@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1)
|
|||
/* Normal 64-bit system call target */
|
||||
ENTRY(xen_syscall_target)
|
||||
undo_xen_syscall
|
||||
jmp system_call_after_swapgs
|
||||
jmp entry_SYSCALL_64_after_swapgs
|
||||
ENDPROC(xen_syscall_target)
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
|
@ -122,13 +122,13 @@ ENDPROC(xen_syscall_target)
|
|||
/* 32-bit compat syscall target */
|
||||
ENTRY(xen_syscall32_target)
|
||||
undo_xen_syscall
|
||||
jmp ia32_cstar_target
|
||||
jmp entry_SYSCALL_compat
|
||||
ENDPROC(xen_syscall32_target)
|
||||
|
||||
/* 32-bit compat sysenter target */
|
||||
ENTRY(xen_sysenter_target)
|
||||
undo_xen_syscall
|
||||
jmp ia32_sysenter_target
|
||||
jmp entry_SYSENTER_compat
|
||||
ENDPROC(xen_sysenter_target)
|
||||
|
||||
#else /* !CONFIG_IA32_EMULATION */
|
||||
|
|
|
@ -212,5 +212,5 @@ EOF
|
|||
)
|
||||
}
|
||||
|
||||
(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \
|
||||
(ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
|
||||
$* -E -x c - > /dev/null
|
||||
|
|
Загрузка…
Ссылка в новой задаче