Rework of the X86 irq stack handling:
The irq stack switching was moved out of the ASM entry code in course of the entry code consolidation. It ended up being suboptimal in various ways. - Make the stack switching inline so the stackpointer manipulation is not longer at an easy to find place. - Get rid of the unnecessary indirect call. - Avoid the double stack switching in interrupt return and reuse the interrupt stack for softirq handling. - A objtool fix for CONFIG_FRAME_POINTER=y builds where it got confused about the stack pointer manipulation. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmA21OcTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoaX0D/9S0ud6oqbsIvI8LwhvYub63a2cjKP9 liHAJ7xwMYYVwzf0skwsPb/QE6+onCzdq0upJkgG/gEYm2KbiaMWZ4GgHdj0O7ER qXKJONDd36AGxSEdaVzLY5kPuD/mkomGk5QdaZaTmjruthkNzg4y/N2wXUBIMZR0 FdpSpp5fGspSZCn/DXDx6FjClwpLI53VclvDs6DcZ2DIBA0K+F/cSLb1UQoDLE1U hxGeuNa+GhKeeZ5C+q5giho1+ukbwtjMW9WnKHAVNiStjm0uzdqq7ERGi/REvkcB LY62u5uOSW1zIBMmzUjDDQEqvypB0iFxFCpN8g9sieZjA0zkaUioRTQyR+YIQ8Cp l8LLir0dVQivR1bHghHDKQJUpdw/4zvDj4mMH10XHqbcOtIxJDOJHC5D00ridsAz OK0RlbAJBl9FTdLNfdVReBCoehYAO8oefeyMAG12nZeSh5XVUWl238rvzmzIYNhG cEtkSx2wIUNEA+uSuI+xvfmwpxL7voTGvqmiRDCAFxyO7Bl/GBu9OEBFA1eOvHB+ +wTmPDMswRetQNh4QCRXzk1JzP1Wk5CobUL9iinCWFoTJmnsPPSOWlosN6ewaNXt kYFpRLy5xt9EP7dlfgBSjiRlthDhTdMrFjD5bsy1vdm1w7HKUo82lHa4O8Hq3PHS tinKICUqRsbjig== =Sqr1 -----END PGP SIGNATURE----- Merge tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 irq entry updates from Thomas Gleixner: "The irq stack switching was moved out of the ASM entry code in course of the entry code consolidation. It ended up being suboptimal in various ways. This reworks the X86 irq stack handling: - Make the stack switching inline so the stackpointer manipulation is not longer at an easy to find place. - Get rid of the unnecessary indirect call. - Avoid the double stack switching in interrupt return and reuse the interrupt stack for softirq handling. - A objtool fix for CONFIG_FRAME_POINTER=y builds where it got confused about the stack pointer manipulation" * tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: objtool: Fix stack-swizzle for FRAME_POINTER=y um: Enforce the usage of asm-generic/softirq_stack.h x86/softirq/64: Inline do_softirq_own_stack() softirq: Move do_softirq_own_stack() to generic asm header softirq: Move __ARCH_HAS_DO_SOFTIRQ to Kconfig x86: Select CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK x86/softirq: Remove indirection in do_softirq_own_stack() x86/entry: Use run_sysvec_on_irqstack_cond() for XEN upcall x86/entry: Convert device interrupts to inline stack switching x86/entry: Convert system vectors to irq stack macro x86/irq: Provide macro for inlining irq stack switching x86/apic: Split out spurious handling code x86/irq/64: Adjust the per CPU irq stack pointer by 8 x86/irq: Sanitize irq stack tracking x86/entry: Fix instrumentation annotation
This commit is contained in:
Коммит
29c395c77a
|
@ -821,6 +821,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||||
This spares a stack switch and improves cache usage on softirq
|
This spares a stack switch and improves cache usage on softirq
|
||||||
processing.
|
processing.
|
||||||
|
|
||||||
|
config HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
|
bool
|
||||||
|
help
|
||||||
|
Architecture provides a function to run __do_softirq() on a
|
||||||
|
seperate stack.
|
||||||
|
|
||||||
config PGTABLE_LEVELS
|
config PGTABLE_LEVELS
|
||||||
int
|
int
|
||||||
default 2
|
default 2
|
||||||
|
|
|
@ -63,6 +63,7 @@ config PARISC
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
|
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_KPROBES_ON_FTRACE
|
select HAVE_KPROBES_ON_FTRACE
|
||||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
|
||||||
select SET_FS
|
select SET_FS
|
||||||
|
|
||||||
help
|
help
|
||||||
|
|
|
@ -12,10 +12,6 @@
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
|
||||||
#ifdef CONFIG_IRQSTACKS
|
|
||||||
#define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int __softirq_pending;
|
unsigned int __softirq_pending;
|
||||||
unsigned int kernel_stack_usage;
|
unsigned int kernel_stack_usage;
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/ldcw.h>
|
#include <asm/ldcw.h>
|
||||||
|
|
||||||
|
|
|
@ -235,6 +235,7 @@ config PPC
|
||||||
select MMU_GATHER_PAGE_SIZE
|
select MMU_GATHER_PAGE_SIZE
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
|
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
select HAVE_SYSCALL_TRACEPOINTS
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING
|
select HAVE_VIRT_CPU_ACCOUNTING
|
||||||
select HAVE_IRQ_TIME_ACCOUNTING
|
select HAVE_IRQ_TIME_ACCOUNTING
|
||||||
|
|
|
@ -37,8 +37,6 @@ extern int distribute_irqs;
|
||||||
|
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
|
||||||
#define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
|
|
||||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||||
/*
|
/*
|
||||||
* Per-cpu stacks for handling critical, debug and machine check
|
* Per-cpu stacks for handling critical, debug and machine check
|
||||||
|
|
|
@ -66,6 +66,7 @@
|
||||||
#include <asm/livepatch.h>
|
#include <asm/livepatch.h>
|
||||||
#include <asm/asm-prototypes.h>
|
#include <asm/asm-prototypes.h>
|
||||||
#include <asm/hw_irq.h>
|
#include <asm/hw_irq.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
#include <asm/paca.h>
|
#include <asm/paca.h>
|
||||||
|
|
|
@ -184,6 +184,7 @@ config S390
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_RELIABLE_STACKTRACE
|
select HAVE_RELIABLE_STACKTRACE
|
||||||
select HAVE_RSEQ
|
select HAVE_RSEQ
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
select HAVE_SYSCALL_TRACEPOINTS
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING
|
select HAVE_VIRT_CPU_ACCOUNTING
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
|
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
|
#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
|
||||||
|
|
||||||
#define __ARCH_IRQ_STAT
|
#define __ARCH_IRQ_STAT
|
||||||
#define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
|
||||||
|
|
||||||
static inline void ack_bad_irq(unsigned int irq)
|
static inline void ack_bad_irq(unsigned int irq)
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/hw_irq.h>
|
#include <asm/hw_irq.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
|
|
||||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
|
||||||
|
|
|
@ -54,6 +54,7 @@ config SUPERH
|
||||||
select HAVE_PERF_EVENTS
|
select HAVE_PERF_EVENTS
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_UID16
|
select HAVE_UID16
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
|
||||||
select HAVE_STACKPROTECTOR
|
select HAVE_STACKPROTECTOR
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
select HAVE_SYSCALL_TRACEPOINTS
|
||||||
select IRQ_FORCED_THREADING
|
select IRQ_FORCED_THREADING
|
||||||
|
|
|
@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
|
||||||
#ifdef CONFIG_IRQSTACKS
|
#ifdef CONFIG_IRQSTACKS
|
||||||
extern void irq_ctx_init(int cpu);
|
extern void irq_ctx_init(int cpu);
|
||||||
extern void irq_ctx_exit(int cpu);
|
extern void irq_ctx_exit(int cpu);
|
||||||
# define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
#else
|
#else
|
||||||
# define irq_ctx_init(cpu) do { } while (0)
|
# define irq_ctx_init(cpu) do { } while (0)
|
||||||
# define irq_ctx_exit(cpu) do { } while (0)
|
# define irq_ctx_exit(cpu) do { } while (0)
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <cpu/mmu_context.h>
|
#include <cpu/mmu_context.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
|
|
||||||
atomic_t irq_err_count;
|
atomic_t irq_err_count;
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,7 @@ config SPARC64
|
||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select PCI_DOMAINS if PCI
|
select PCI_DOMAINS if PCI
|
||||||
select ARCH_HAS_GIGANTIC_PAGE
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
|
|
||||||
config ARCH_PROC_KCORE_TEXT
|
config ARCH_PROC_KCORE_TEXT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
|
@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
||||||
|
|
||||||
extern void *hardirq_stack[NR_CPUS];
|
extern void *hardirq_stack[NR_CPUS];
|
||||||
extern void *softirq_stack[NR_CPUS];
|
extern void *softirq_stack[NR_CPUS];
|
||||||
#define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
|
|
||||||
#define NO_IRQ 0xffffffff
|
#define NO_IRQ 0xffffffff
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include <asm/head.h>
|
#include <asm/head.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
|
|
||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
#include "cpumap.h"
|
#include "cpumap.h"
|
||||||
|
|
|
@ -20,6 +20,7 @@ generic-y += param.h
|
||||||
generic-y += pci.h
|
generic-y += pci.h
|
||||||
generic-y += percpu.h
|
generic-y += percpu.h
|
||||||
generic-y += preempt.h
|
generic-y += preempt.h
|
||||||
|
generic-y += softirq_stack.h
|
||||||
generic-y += switch_to.h
|
generic-y += switch_to.h
|
||||||
generic-y += topology.h
|
generic-y += topology.h
|
||||||
generic-y += trace_clock.h
|
generic-y += trace_clock.h
|
||||||
|
|
|
@ -191,6 +191,7 @@ config X86
|
||||||
select HAVE_HW_BREAKPOINT
|
select HAVE_HW_BREAKPOINT
|
||||||
select HAVE_IDE
|
select HAVE_IDE
|
||||||
select HAVE_IOREMAP_PROT
|
select HAVE_IOREMAP_PROT
|
||||||
|
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
|
||||||
select HAVE_IRQ_TIME_ACCOUNTING
|
select HAVE_IRQ_TIME_ACCOUNTING
|
||||||
select HAVE_KERNEL_BZIP2
|
select HAVE_KERNEL_BZIP2
|
||||||
select HAVE_KERNEL_GZIP
|
select HAVE_KERNEL_GZIP
|
||||||
|
@ -223,6 +224,7 @@ config X86
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
|
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
|
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
|
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
|
||||||
select HAVE_STACK_VALIDATION if X86_64
|
select HAVE_STACK_VALIDATION if X86_64
|
||||||
select HAVE_STATIC_CALL
|
select HAVE_STATIC_CALL
|
||||||
|
|
|
@ -249,30 +249,23 @@ static __always_inline bool get_and_clear_inhcall(void) { return false; }
|
||||||
static __always_inline void restore_inhcall(bool inhcall) { }
|
static __always_inline void restore_inhcall(bool inhcall) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __xen_pv_evtchn_do_upcall(void)
|
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
irq_enter_rcu();
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
|
|
||||||
inc_irq_stat(irq_hv_callback_count);
|
inc_irq_stat(irq_hv_callback_count);
|
||||||
|
|
||||||
xen_hvm_evtchn_do_upcall();
|
xen_hvm_evtchn_do_upcall();
|
||||||
|
|
||||||
irq_exit_rcu();
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *old_regs;
|
irqentry_state_t state = irqentry_enter(regs);
|
||||||
bool inhcall;
|
bool inhcall;
|
||||||
irqentry_state_t state;
|
|
||||||
|
|
||||||
state = irqentry_enter(regs);
|
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||||
old_regs = set_irq_regs(regs);
|
|
||||||
|
|
||||||
instrumentation_begin();
|
|
||||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
|
||||||
instrumentation_begin();
|
|
||||||
|
|
||||||
set_irq_regs(old_regs);
|
|
||||||
|
|
||||||
inhcall = get_and_clear_inhcall();
|
inhcall = get_and_clear_inhcall();
|
||||||
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
|
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
|
||||||
|
|
|
@ -754,47 +754,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
|
||||||
SYM_CODE_END(.Lbad_gs)
|
SYM_CODE_END(.Lbad_gs)
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
/*
|
|
||||||
* rdi: New stack pointer points to the top word of the stack
|
|
||||||
* rsi: Function pointer
|
|
||||||
* rdx: Function argument (can be NULL if none)
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START(asm_call_on_stack)
|
|
||||||
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
|
|
||||||
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
|
|
||||||
/*
|
|
||||||
* Save the frame pointer unconditionally. This allows the ORC
|
|
||||||
* unwinder to handle the stack switch.
|
|
||||||
*/
|
|
||||||
pushq %rbp
|
|
||||||
mov %rsp, %rbp
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The unwinder relies on the word at the top of the new stack
|
|
||||||
* page linking back to the previous RSP.
|
|
||||||
*/
|
|
||||||
mov %rsp, (%rdi)
|
|
||||||
mov %rdi, %rsp
|
|
||||||
/* Move the argument to the right place */
|
|
||||||
mov %rdx, %rdi
|
|
||||||
|
|
||||||
1:
|
|
||||||
.pushsection .discard.instr_begin
|
|
||||||
.long 1b - .
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
CALL_NOSPEC rsi
|
|
||||||
|
|
||||||
2:
|
|
||||||
.pushsection .discard.instr_end
|
|
||||||
.long 2b - .
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
/* Restore the previous stack pointer from RBP. */
|
|
||||||
leaveq
|
|
||||||
ret
|
|
||||||
SYM_FUNC_END(asm_call_on_stack)
|
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
/*
|
/*
|
||||||
* A note on the "critical region" in our callback handler.
|
* A note on the "critical region" in our callback handler.
|
||||||
|
|
|
@ -187,23 +187,22 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
|
||||||
* has to be done in the function body if necessary.
|
* has to be done in the function body if necessary.
|
||||||
*/
|
*/
|
||||||
#define DEFINE_IDTENTRY_IRQ(func) \
|
#define DEFINE_IDTENTRY_IRQ(func) \
|
||||||
static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
|
static void __##func(struct pt_regs *regs, u32 vector); \
|
||||||
\
|
\
|
||||||
__visible noinstr void func(struct pt_regs *regs, \
|
__visible noinstr void func(struct pt_regs *regs, \
|
||||||
unsigned long error_code) \
|
unsigned long error_code) \
|
||||||
{ \
|
{ \
|
||||||
irqentry_state_t state = irqentry_enter(regs); \
|
irqentry_state_t state = irqentry_enter(regs); \
|
||||||
|
u32 vector = (u32)(u8)error_code; \
|
||||||
\
|
\
|
||||||
instrumentation_begin(); \
|
instrumentation_begin(); \
|
||||||
irq_enter_rcu(); \
|
|
||||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||||
__##func (regs, (u8)error_code); \
|
run_irq_on_irqstack_cond(__##func, regs, vector); \
|
||||||
irq_exit_rcu(); \
|
|
||||||
instrumentation_end(); \
|
instrumentation_end(); \
|
||||||
irqentry_exit(regs, state); \
|
irqentry_exit(regs, state); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static __always_inline void __##func(struct pt_regs *regs, u8 vector)
|
static noinline void __##func(struct pt_regs *regs, u32 vector)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
|
* DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
|
||||||
|
@ -237,10 +236,8 @@ __visible noinstr void func(struct pt_regs *regs) \
|
||||||
irqentry_state_t state = irqentry_enter(regs); \
|
irqentry_state_t state = irqentry_enter(regs); \
|
||||||
\
|
\
|
||||||
instrumentation_begin(); \
|
instrumentation_begin(); \
|
||||||
irq_enter_rcu(); \
|
|
||||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||||
run_sysvec_on_irqstack_cond(__##func, regs); \
|
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||||
irq_exit_rcu(); \
|
|
||||||
instrumentation_end(); \
|
instrumentation_end(); \
|
||||||
irqentry_exit(regs, state); \
|
irqentry_exit(regs, state); \
|
||||||
} \
|
} \
|
||||||
|
|
|
@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq)
|
||||||
|
|
||||||
extern int irq_init_percpu_irqstack(unsigned int cpu);
|
extern int irq_init_percpu_irqstack(unsigned int cpu);
|
||||||
|
|
||||||
#define __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
|
|
||||||
struct irq_desc;
|
struct irq_desc;
|
||||||
|
|
||||||
extern void fixup_irqs(void);
|
extern void fixup_irqs(void);
|
||||||
|
|
|
@ -7,100 +7,217 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
static __always_inline bool irqstack_active(void)
|
|
||||||
{
|
/*
|
||||||
return __this_cpu_read(irq_count) != -1;
|
* Macro to inline switching to an interrupt stack and invoking function
|
||||||
|
* calls from there. The following rules apply:
|
||||||
|
*
|
||||||
|
* - Ordering:
|
||||||
|
*
|
||||||
|
* 1. Write the stack pointer into the top most place of the irq
|
||||||
|
* stack. This ensures that the various unwinders can link back to the
|
||||||
|
* original stack.
|
||||||
|
*
|
||||||
|
* 2. Switch the stack pointer to the top of the irq stack.
|
||||||
|
*
|
||||||
|
* 3. Invoke whatever needs to be done (@asm_call argument)
|
||||||
|
*
|
||||||
|
* 4. Pop the original stack pointer from the top of the irq stack
|
||||||
|
* which brings it back to the original stack where it left off.
|
||||||
|
*
|
||||||
|
* - Function invocation:
|
||||||
|
*
|
||||||
|
* To allow flexible usage of the macro, the actual function code including
|
||||||
|
* the store of the arguments in the call ABI registers is handed in via
|
||||||
|
* the @asm_call argument.
|
||||||
|
*
|
||||||
|
* - Local variables:
|
||||||
|
*
|
||||||
|
* @tos:
|
||||||
|
* The @tos variable holds a pointer to the top of the irq stack and
|
||||||
|
* _must_ be allocated in a non-callee saved register as this is a
|
||||||
|
* restriction coming from objtool.
|
||||||
|
*
|
||||||
|
* Note, that (tos) is both in input and output constraints to ensure
|
||||||
|
* that the compiler does not assume that R11 is left untouched in
|
||||||
|
* case this macro is used in some place where the per cpu interrupt
|
||||||
|
* stack pointer is used again afterwards
|
||||||
|
*
|
||||||
|
* - Function arguments:
|
||||||
|
* The function argument(s), if any, have to be defined in register
|
||||||
|
* variables at the place where this is invoked. Storing the
|
||||||
|
* argument(s) in the proper register(s) is part of the @asm_call
|
||||||
|
*
|
||||||
|
* - Constraints:
|
||||||
|
*
|
||||||
|
* The constraints have to be done very carefully because the compiler
|
||||||
|
* does not know about the assembly call.
|
||||||
|
*
|
||||||
|
* output:
|
||||||
|
* As documented already above the @tos variable is required to be in
|
||||||
|
* the output constraints to make the compiler aware that R11 cannot be
|
||||||
|
* reused after the asm() statement.
|
||||||
|
*
|
||||||
|
* For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is
|
||||||
|
* required as well as this prevents certain creative GCC variants from
|
||||||
|
* misplacing the ASM code.
|
||||||
|
*
|
||||||
|
* input:
|
||||||
|
* - func:
|
||||||
|
* Immediate, which tells the compiler that the function is referenced.
|
||||||
|
*
|
||||||
|
* - tos:
|
||||||
|
* Register. The actual register is defined by the variable declaration.
|
||||||
|
*
|
||||||
|
* - function arguments:
|
||||||
|
* The constraints are handed in via the 'argconstr' argument list. They
|
||||||
|
* describe the register arguments which are used in @asm_call.
|
||||||
|
*
|
||||||
|
* clobbers:
|
||||||
|
* Function calls can clobber anything except the callee-saved
|
||||||
|
* registers. Tell the compiler.
|
||||||
|
*/
|
||||||
|
#define call_on_irqstack(func, asm_call, argconstr...) \
|
||||||
|
{ \
|
||||||
|
register void *tos asm("r11"); \
|
||||||
|
\
|
||||||
|
tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \
|
||||||
|
\
|
||||||
|
asm_inline volatile( \
|
||||||
|
"movq %%rsp, (%[tos]) \n" \
|
||||||
|
"movq %[tos], %%rsp \n" \
|
||||||
|
\
|
||||||
|
asm_call \
|
||||||
|
\
|
||||||
|
"popq %%rsp \n" \
|
||||||
|
\
|
||||||
|
: "+r" (tos), ASM_CALL_CONSTRAINT \
|
||||||
|
: [__func] "i" (func), [tos] "r" (tos) argconstr \
|
||||||
|
: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
|
||||||
|
"memory" \
|
||||||
|
); \
|
||||||
}
|
}
|
||||||
|
|
||||||
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
|
/* Macros to assert type correctness for run_*_on_irqstack macros */
|
||||||
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
|
#define assert_function_type(func, proto) \
|
||||||
struct pt_regs *regs);
|
static_assert(__builtin_types_compatible_p(typeof(&func), proto))
|
||||||
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
|
|
||||||
struct irq_desc *desc);
|
|
||||||
|
|
||||||
static __always_inline void __run_on_irqstack(void (*func)(void))
|
#define assert_arg_type(arg, proto) \
|
||||||
{
|
static_assert(__builtin_types_compatible_p(typeof(arg), proto))
|
||||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
|
||||||
|
|
||||||
__this_cpu_add(irq_count, 1);
|
/*
|
||||||
asm_call_on_stack(tos - 8, func, NULL);
|
* Macro to invoke system vector and device interrupt C handlers.
|
||||||
__this_cpu_sub(irq_count, 1);
|
*/
|
||||||
|
#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
|
||||||
|
{ \
|
||||||
|
/* \
|
||||||
|
* User mode entry and interrupt on the irq stack do not \
|
||||||
|
* switch stacks. If from user mode the task stack is empty. \
|
||||||
|
*/ \
|
||||||
|
if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
|
||||||
|
irq_enter_rcu(); \
|
||||||
|
func(c_args); \
|
||||||
|
irq_exit_rcu(); \
|
||||||
|
} else { \
|
||||||
|
/* \
|
||||||
|
* Mark the irq stack inuse _before_ and unmark _after_ \
|
||||||
|
* switching stacks. Interrupts are disabled in both \
|
||||||
|
* places. Invoke the stack switch macro with the call \
|
||||||
|
* sequence which matches the above direct invocation. \
|
||||||
|
*/ \
|
||||||
|
__this_cpu_write(hardirq_stack_inuse, true); \
|
||||||
|
call_on_irqstack(func, asm_call, constr); \
|
||||||
|
__this_cpu_write(hardirq_stack_inuse, false); \
|
||||||
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void
|
/*
|
||||||
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
* Function call sequence for __call_on_irqstack() for system vectors.
|
||||||
struct pt_regs *regs)
|
*
|
||||||
{
|
* Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
|
||||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
* mechanism because these functions are global and cannot be optimized out
|
||||||
|
* when compiling a particular source file which uses one of these macros.
|
||||||
|
*
|
||||||
|
* The argument (regs) does not need to be pushed or stashed in a callee
|
||||||
|
* saved register to be safe vs. the irq_enter_rcu() call because the
|
||||||
|
* clobbers already prevent the compiler from storing it in a callee
|
||||||
|
* clobbered register. As the compiler has to preserve @regs for the final
|
||||||
|
* call to idtentry_exit() anyway, it's likely that it does not cause extra
|
||||||
|
* effort for this asm magic.
|
||||||
|
*/
|
||||||
|
#define ASM_CALL_SYSVEC \
|
||||||
|
"call irq_enter_rcu \n" \
|
||||||
|
"movq %[arg1], %%rdi \n" \
|
||||||
|
"call %P[__func] \n" \
|
||||||
|
"call irq_exit_rcu \n"
|
||||||
|
|
||||||
__this_cpu_add(irq_count, 1);
|
#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
|
||||||
asm_call_sysvec_on_stack(tos - 8, func, regs);
|
|
||||||
__this_cpu_sub(irq_count, 1);
|
#define run_sysvec_on_irqstack_cond(func, regs) \
|
||||||
|
{ \
|
||||||
|
assert_function_type(func, void (*)(struct pt_regs *)); \
|
||||||
|
assert_arg_type(regs, struct pt_regs *); \
|
||||||
|
\
|
||||||
|
call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
|
||||||
|
SYSVEC_CONSTRAINTS, regs); \
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void
|
/*
|
||||||
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
* As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
|
||||||
struct irq_desc *desc)
|
* @regs and @vector in callee saved registers.
|
||||||
{
|
*/
|
||||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
#define ASM_CALL_IRQ \
|
||||||
|
"call irq_enter_rcu \n" \
|
||||||
|
"movq %[arg1], %%rdi \n" \
|
||||||
|
"movl %[arg2], %%esi \n" \
|
||||||
|
"call %P[__func] \n" \
|
||||||
|
"call irq_exit_rcu \n"
|
||||||
|
|
||||||
__this_cpu_add(irq_count, 1);
|
#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector)
|
||||||
asm_call_irq_on_stack(tos - 8, func, desc);
|
|
||||||
__this_cpu_sub(irq_count, 1);
|
#define run_irq_on_irqstack_cond(func, regs, vector) \
|
||||||
|
{ \
|
||||||
|
assert_function_type(func, void (*)(struct pt_regs *, u32)); \
|
||||||
|
assert_arg_type(regs, struct pt_regs *); \
|
||||||
|
assert_arg_type(vector, u32); \
|
||||||
|
\
|
||||||
|
call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
|
||||||
|
IRQ_CONSTRAINTS, regs, vector); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ASM_CALL_SOFTIRQ \
|
||||||
|
"call %P[__func] \n"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macro to invoke __do_softirq on the irq stack. This is only called from
|
||||||
|
* task context when bottom halfs are about to be reenabled and soft
|
||||||
|
* interrupts are pending to be processed. The interrupt stack cannot be in
|
||||||
|
* use here.
|
||||||
|
*/
|
||||||
|
#define do_softirq_own_stack() \
|
||||||
|
{ \
|
||||||
|
__this_cpu_write(hardirq_stack_inuse, true); \
|
||||||
|
call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
|
||||||
|
__this_cpu_write(hardirq_stack_inuse, false); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
static inline bool irqstack_active(void) { return false; }
|
/* System vector handlers always run on the stack they interrupted. */
|
||||||
static inline void __run_on_irqstack(void (*func)(void)) { }
|
#define run_sysvec_on_irqstack_cond(func, regs) \
|
||||||
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
{ \
|
||||||
struct pt_regs *regs) { }
|
irq_enter_rcu(); \
|
||||||
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
func(regs); \
|
||||||
struct irq_desc *desc) { }
|
irq_exit_rcu(); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Switches to the irq stack within func() */
|
||||||
|
#define run_irq_on_irqstack_cond(func, regs, vector) \
|
||||||
|
{ \
|
||||||
|
irq_enter_rcu(); \
|
||||||
|
func(regs, vector); \
|
||||||
|
irq_exit_rcu(); \
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* !CONFIG_X86_64 */
|
#endif /* !CONFIG_X86_64 */
|
||||||
|
|
||||||
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
if (IS_ENABLED(CONFIG_X86_32))
|
|
||||||
return false;
|
|
||||||
if (!regs)
|
|
||||||
return !irqstack_active();
|
|
||||||
return !user_mode(regs) && !irqstack_active();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static __always_inline void run_on_irqstack_cond(void (*func)(void),
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
if (irq_needs_irq_stack(regs))
|
|
||||||
__run_on_irqstack(func);
|
|
||||||
else
|
|
||||||
func();
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void
|
|
||||||
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
if (irq_needs_irq_stack(regs))
|
|
||||||
__run_sysvec_on_irqstack(func, regs);
|
|
||||||
else
|
|
||||||
func(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline void
|
|
||||||
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
if (irq_needs_irq_stack(regs))
|
|
||||||
__run_irq_on_irqstack(func, desc);
|
|
||||||
else
|
|
||||||
func(desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -426,8 +426,6 @@ struct irq_stack {
|
||||||
char stack[IRQ_STACK_SIZE];
|
char stack[IRQ_STACK_SIZE];
|
||||||
} __aligned(IRQ_STACK_SIZE);
|
} __aligned(IRQ_STACK_SIZE);
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
||||||
#else
|
#else
|
||||||
|
@ -454,7 +452,8 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
|
||||||
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
|
return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_PER_CPU(unsigned int, irq_count);
|
DECLARE_PER_CPU(void *, hardirq_stack_ptr);
|
||||||
|
DECLARE_PER_CPU(bool, hardirq_stack_inuse);
|
||||||
extern asmlinkage void ignore_sysret(void);
|
extern asmlinkage void ignore_sysret(void);
|
||||||
|
|
||||||
/* Save actual FS/GS selectors and bases to current->thread */
|
/* Save actual FS/GS selectors and bases to current->thread */
|
||||||
|
@ -473,9 +472,9 @@ struct stack_canary {
|
||||||
};
|
};
|
||||||
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
||||||
#endif
|
#endif
|
||||||
/* Per CPU softirq stack pointer */
|
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
||||||
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
||||||
#endif /* X86_64 */
|
#endif /* !X86_64 */
|
||||||
|
|
||||||
extern unsigned int fpu_kernel_xstate_size;
|
extern unsigned int fpu_kernel_xstate_size;
|
||||||
extern unsigned int fpu_user_xstate_size;
|
extern unsigned int fpu_user_xstate_size;
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_X86_SOFTIRQ_STACK_H
|
||||||
|
#define _ASM_X86_SOFTIRQ_STACK_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
# include <asm/irq_stack.h>
|
||||||
|
#else
|
||||||
|
# include <asm-generic/softirq_stack.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -2138,18 +2138,11 @@ void __init register_lapic_address(unsigned long address)
|
||||||
* Local APIC interrupts
|
* Local APIC interrupts
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* spurious_interrupt - Catch all for interrupts raised on unused vectors
|
* Common handling code for spurious_interrupt and spurious_vector entry
|
||||||
* @regs: Pointer to pt_regs on stack
|
* points below. No point in allowing the compiler to inline it twice.
|
||||||
* @vector: The vector number
|
|
||||||
*
|
|
||||||
* This is invoked from ASM entry code to catch all interrupts which
|
|
||||||
* trigger on an entry which is routed to the common_spurious idtentry
|
|
||||||
* point.
|
|
||||||
*
|
|
||||||
* Also called from sysvec_spurious_apic_interrupt().
|
|
||||||
*/
|
*/
|
||||||
DEFINE_IDTENTRY_IRQ(spurious_interrupt)
|
static noinline void handle_spurious_interrupt(u8 vector)
|
||||||
{
|
{
|
||||||
u32 v;
|
u32 v;
|
||||||
|
|
||||||
|
@ -2184,9 +2177,23 @@ out:
|
||||||
trace_spurious_apic_exit(vector);
|
trace_spurious_apic_exit(vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* spurious_interrupt - Catch all for interrupts raised on unused vectors
|
||||||
|
* @regs: Pointer to pt_regs on stack
|
||||||
|
* @vector: The vector number
|
||||||
|
*
|
||||||
|
* This is invoked from ASM entry code to catch all interrupts which
|
||||||
|
* trigger on an entry which is routed to the common_spurious idtentry
|
||||||
|
* point.
|
||||||
|
*/
|
||||||
|
DEFINE_IDTENTRY_IRQ(spurious_interrupt)
|
||||||
|
{
|
||||||
|
handle_spurious_interrupt(vector);
|
||||||
|
}
|
||||||
|
|
||||||
DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
|
DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
|
||||||
{
|
{
|
||||||
__spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
|
handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1742,8 +1742,8 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
|
||||||
&init_task;
|
&init_task;
|
||||||
EXPORT_PER_CPU_SYMBOL(current_task);
|
EXPORT_PER_CPU_SYMBOL(current_task);
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
DEFINE_PER_CPU(void *, hardirq_stack_ptr);
|
||||||
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
|
DEFINE_PER_CPU(bool, hardirq_stack_inuse);
|
||||||
|
|
||||||
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
||||||
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
||||||
|
|
|
@ -129,11 +129,20 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
|
||||||
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||||
{
|
{
|
||||||
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
||||||
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
|
unsigned long *begin;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a software stack, so 'end' can be a valid stack pointer.
|
* @end points directly to the top most stack entry to avoid a -8
|
||||||
* It just means the stack is empty.
|
* adjustment in the stack switch hotpath. Adjust it back before
|
||||||
|
* calculating @begin.
|
||||||
|
*/
|
||||||
|
end++;
|
||||||
|
begin = end - (IRQ_STACK_SIZE / sizeof(long));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Due to the switching logic RSP can never be == @end because the
|
||||||
|
* final operation is 'popq %rsp' which means after that RSP points
|
||||||
|
* to the original stack and not to @end.
|
||||||
*/
|
*/
|
||||||
if (stack < begin || stack >= end)
|
if (stack < begin || stack >= end)
|
||||||
return false;
|
return false;
|
||||||
|
@ -143,8 +152,9 @@ static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info
|
||||||
info->end = end;
|
info->end = end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The next stack pointer is the first thing pushed by the entry code
|
* The next stack pointer is stored at the top of the irq stack
|
||||||
* after switching to the irq stack.
|
* before switching to the irq stack. Actual stack entries are all
|
||||||
|
* below that.
|
||||||
*/
|
*/
|
||||||
info->next_sp = (unsigned long *)*(end - 1);
|
info->next_sp = (unsigned long *)*(end - 1);
|
||||||
|
|
||||||
|
|
|
@ -228,7 +228,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_X86_64))
|
if (IS_ENABLED(CONFIG_X86_64))
|
||||||
run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
|
generic_handle_irq_desc(desc);
|
||||||
else
|
else
|
||||||
__handle_irq(desc, regs);
|
__handle_irq(desc, regs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/nospec-branch.h>
|
#include <asm/nospec-branch.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
|
|
||||||
#include <asm/cpu_entry_area.h>
|
#include <asm/cpu_entry_area.h>
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
#include <asm/irq_stack.h>
|
#include <asm/irq_stack.h>
|
||||||
#include <asm/io_apic.h>
|
#include <asm/io_apic.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
@ -48,7 +49,8 @@ static int map_irq_stack(unsigned int cpu)
|
||||||
if (!va)
|
if (!va)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
|
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||||
|
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -60,7 +62,8 @@ static int map_irq_stack(unsigned int cpu)
|
||||||
{
|
{
|
||||||
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
|
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
|
||||||
|
|
||||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
|
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||||
|
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -71,8 +74,3 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
return map_irq_stack(cpu);
|
return map_irq_stack(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_softirq_own_stack(void)
|
|
||||||
{
|
|
||||||
run_on_irqstack_cond(__do_softirq, NULL);
|
|
||||||
}
|
|
||||||
|
|
|
@ -539,7 +539,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
||||||
this_cpu_read(irq_count) != -1);
|
this_cpu_read(hardirq_stack_inuse));
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||||
switch_fpu_prepare(prev_fpu, cpu);
|
switch_fpu_prepare(prev_fpu, cpu);
|
||||||
|
|
|
@ -50,6 +50,7 @@ mandatory-y += sections.h
|
||||||
mandatory-y += serial.h
|
mandatory-y += serial.h
|
||||||
mandatory-y += shmparam.h
|
mandatory-y += shmparam.h
|
||||||
mandatory-y += simd.h
|
mandatory-y += simd.h
|
||||||
|
mandatory-y += softirq_stack.h
|
||||||
mandatory-y += switch_to.h
|
mandatory-y += switch_to.h
|
||||||
mandatory-y += timex.h
|
mandatory-y += timex.h
|
||||||
mandatory-y += tlbflush.h
|
mandatory-y += tlbflush.h
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||||
|
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
|
||||||
|
#define __ASM_GENERIC_SOFTIRQ_STACK_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
|
||||||
|
void do_softirq_own_stack(void);
|
||||||
|
#else
|
||||||
|
static inline void do_softirq_own_stack(void)
|
||||||
|
{
|
||||||
|
__do_softirq();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -569,15 +569,6 @@ struct softirq_action
|
||||||
asmlinkage void do_softirq(void);
|
asmlinkage void do_softirq(void);
|
||||||
asmlinkage void __do_softirq(void);
|
asmlinkage void __do_softirq(void);
|
||||||
|
|
||||||
#ifdef __ARCH_HAS_DO_SOFTIRQ
|
|
||||||
void do_softirq_own_stack(void);
|
|
||||||
#else
|
|
||||||
static inline void do_softirq_own_stack(void)
|
|
||||||
{
|
|
||||||
__do_softirq();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
||||||
extern void softirq_init(void);
|
extern void softirq_init(void);
|
||||||
extern void __raise_softirq_irqoff(unsigned int nr);
|
extern void __raise_softirq_irqoff(unsigned int nr);
|
||||||
|
|
|
@ -26,6 +26,8 @@
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
|
||||||
|
#include <asm/softirq_stack.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/irq.h>
|
#include <trace/events/irq.h>
|
||||||
|
|
||||||
|
|
|
@ -2084,6 +2084,20 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
else if (op->dest.reg == CFI_SP &&
|
||||||
|
cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
|
||||||
|
cfi->vals[op->src.reg].offset == cfa->offset) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The same stack swizzle case 2) as above. But
|
||||||
|
* because we can't change cfa->base, case 3)
|
||||||
|
* will become a regular POP. Pretend we're a
|
||||||
|
* PUSH so things don't go unbalanced.
|
||||||
|
*/
|
||||||
|
cfi->stack_size += 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_SRC_ADD:
|
case OP_SRC_ADD:
|
||||||
|
|
Загрузка…
Ссылка в новой задаче