Merge remote-tracking branch 'arm64/for-next/fixes' into for-next/core

* arm64/for-next/fixes: (26 commits)
  arm64: mte: fix prctl(PR_GET_TAGGED_ADDR_CTRL) if TCF0=NONE
  arm64: mte: Fix typo in macro definition
  arm64: entry: fix EL1 debug transitions
  arm64: entry: fix NMI {user, kernel}->kernel transitions
  arm64: entry: fix non-NMI kernel<->kernel transitions
  arm64: ptrace: prepare for EL1 irq/rcu tracking
  arm64: entry: fix non-NMI user<->kernel transitions
  arm64: entry: move el1 irq/nmi logic to C
  arm64: entry: prepare ret_to_user for function call
  arm64: entry: move enter_from_user_mode to entry-common.c
  arm64: entry: mark entry code as noinstr
  arm64: mark idle code as noinstr
  arm64: syscall: exit userspace before unmasking exceptions
  arm64: pgtable: Ensure dirty bit is preserved across pte_wrprotect()
  arm64: pgtable: Fix pte_accessible()
  ACPI/IORT: Fix doc warnings in iort.c
  arm64/fpsimd: add <asm/insn.h> to <asm/kprobes.h> to fix fpsimd build
  arm64: cpu_errata: Apply Erratum 845719 to KRYO2XX Silver
  arm64: proton-pack: Add KRYO2XX silver CPUs to spectre-v2 safe-list
  arm64: kpti: Add KRYO2XX gold/silver CPU cores to kpti safelist
  ...

# Conflicts:
#	arch/arm64/include/asm/exception.h
#	arch/arm64/kernel/sdei.c
This commit is contained in:
Catalin Marinas 2020-12-09 18:04:55 +00:00
Родитель d45056ad73 929c1f3384
Коммит d889797530
26 изменённых файлов: 304 добавлений и 210 удалений

Просмотреть файл

@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/*
* CPU feature detected at boot time based on feature of one or more CPUs.
* All possible conflicts for a late CPU are ignored.
* NOTE: this means that a late CPU with the feature will *not* cause the
* capability to be advertised by cpus_have_*cap()!
*/
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \

Просмотреть файл

@ -86,6 +86,8 @@
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@ -116,6 +118,8 @@
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)

Просмотреть файл

@ -128,6 +128,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
{
unsigned long flags = regs->pstate & DAIF_MASK;
if (interrupts_enabled(regs))
trace_hardirqs_on();
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can

Просмотреть файл

@ -31,7 +31,12 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void enter_from_user_mode(void);
asmlinkage void exit_to_user_mode(void);
void arm64_enter_nmi(struct pt_regs *regs);
void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);

Просмотреть файл

@ -115,8 +115,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@ -124,9 +122,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
* Note that we can't make any assumptions based on the state of the access
* flag, since ptep_clear_flush_young() elides a DSB when invalidating the
* TLB.
*/
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@ -164,13 +165,6 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
return pmd;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@ -196,6 +190,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@ -846,12 +854,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));

Просмотреть файл

@ -7,6 +7,8 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
#include <asm/insn.h>
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);

Просмотреть файл

@ -197,6 +197,10 @@ struct pt_regs {
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
u64 pmr_save;
u64 stackframe[2];
/* Only valid for some EL1 exceptions. */
u64 lockdep_hardirqs;
u64 exit_rcu;
};
static inline bool in_syscall(struct pt_regs const *regs)

Просмотреть файл

@ -993,7 +993,7 @@
#define SYS_TFSR_EL1_TF0_SHIFT 0
#define SYS_TFSR_EL1_TF1_SHIFT 1
#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))

Просмотреть файл

@ -299,6 +299,8 @@ static const struct midr_range erratum_845719_list[] = {
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
/* Brahma-B53 r0p[0] */
MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
/* Kryo2XX Silver rAp4 */
MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
{},
};
#endif

Просмотреть файл

@ -1333,6 +1333,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }

Просмотреть файл

@ -17,39 +17,163 @@
#include <asm/mmu.h>
#include <asm/sysreg.h>
static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
/*
* This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only.
*/
static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
{
regs->exit_rcu = false;
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter();
trace_hardirqs_off_finish();
regs->exit_rcu = true;
return;
}
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
}
/*
* This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere.
*/
static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
trace_hardirqs_on();
} else {
if (regs->exit_rcu)
rcu_irq_exit();
}
}
void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
rcu_nmi_enter();
trace_hardirqs_off_finish();
ftrace_nmi_enter();
}
void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
rcu_nmi_exit();
lockdep_hardirq_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
}
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
else
enter_from_kernel_mode(regs);
}
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
else
exit_to_kernel_mode(regs);
}
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_mem_abort(far, esr, regs);
local_daif_mask();
exit_to_kernel_mode(regs);
}
NOKPROBE_SYMBOL(el1_abort);
static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
local_daif_mask();
exit_to_kernel_mode(regs);
}
NOKPROBE_SYMBOL(el1_pc);
static void notrace el1_undef(struct pt_regs *regs)
static void noinstr el1_undef(struct pt_regs *regs)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_undefinstr(regs);
local_daif_mask();
exit_to_kernel_mode(regs);
}
NOKPROBE_SYMBOL(el1_undef);
static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
NOKPROBE_SYMBOL(el1_inv);
static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_nmi_enter();
trace_hardirqs_off_finish();
}
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
rcu_nmi_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@ -61,18 +185,21 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
arm64_enter_el1_dbg(regs);
do_debug_exception(far, esr, regs);
arm64_exit_el1_dbg(regs);
}
NOKPROBE_SYMBOL(el1_dbg);
static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_ptrauth_fault(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
NOKPROBE_SYMBOL(el1_fpac);
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -105,19 +232,33 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
el1_inv(regs, esr);
}
}
NOKPROBE_SYMBOL(el1_sync_handler);
static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
}
asmlinkage void noinstr exit_to_user_mode(void)
{
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_da);
static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@ -129,90 +270,80 @@ static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening();
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_ia);
static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs);
}
NOKPROBE_SYMBOL(el0_fpsimd_acc);
static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs);
}
NOKPROBE_SYMBOL(el0_sve_acc);
static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs);
}
NOKPROBE_SYMBOL(el0_fpsimd_exc);
static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sysinstr(esr, regs);
}
NOKPROBE_SYMBOL(el0_sys);
static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs);
}
NOKPROBE_SYMBOL(el0_pc);
static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs);
}
NOKPROBE_SYMBOL(el0_sp);
static void notrace el0_undef(struct pt_regs *regs)
static void noinstr el0_undef(struct pt_regs *regs)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs);
}
NOKPROBE_SYMBOL(el0_undef);
static void notrace el0_bti(struct pt_regs *regs)
static void noinstr el0_bti(struct pt_regs *regs)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_bti(regs);
}
NOKPROBE_SYMBOL(el0_bti);
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr);
}
NOKPROBE_SYMBOL(el0_inv);
static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
{
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
@ -220,30 +351,28 @@ static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
user_exit_irqoff();
enter_from_user_mode();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
NOKPROBE_SYMBOL(el0_dbg);
static void notrace el0_svc(struct pt_regs *regs)
static void noinstr el0_svc(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
do_el0_svc(regs);
}
NOKPROBE_SYMBOL(el0_svc);
static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr);
}
NOKPROBE_SYMBOL(el0_fpac);
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -295,27 +424,25 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
NOKPROBE_SYMBOL(el0_sync_handler);
#ifdef CONFIG_COMPAT
static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
user_exit_irqoff();
enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_cp15instr(esr, regs);
}
NOKPROBE_SYMBOL(el0_cp15);
static void notrace el0_svc_compat(struct pt_regs *regs)
static void noinstr el0_svc_compat(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
enter_from_user_mode();
do_el0_svc_compat(regs);
}
NOKPROBE_SYMBOL(el0_svc_compat);
asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@ -358,5 +485,4 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
NOKPROBE_SYMBOL(el0_sync_compat_handler);
#endif /* CONFIG_COMPAT */

Просмотреть файл

@ -30,18 +30,18 @@
#include <asm/unistd.h>
/*
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
.macro ct_user_exit_irqoff
#ifdef CONFIG_CONTEXT_TRACKING
.macro user_exit_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm
.macro ct_user_enter
#ifdef CONFIG_CONTEXT_TRACKING
bl context_tracking_user_enter
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
#endif
.endm
@ -286,9 +286,6 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
alternative_else_nop_endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
.endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
@ -625,16 +622,8 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
#ifdef CONFIG_ARM64_PSEUDO_NMI
test_irqs_unmasked res=x0, pmr=x20
cbz x0, 1f
bl asm_nmi_enter
1:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
mov x0, sp
bl enter_el1_irq_or_nmi
irq_handler
@ -653,26 +642,8 @@ alternative_else_nop_endif
1:
#endif
#ifdef CONFIG_ARM64_PSEUDO_NMI
/*
* When using IRQ priority masking, we can get spurious interrupts while
* PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
* section with interrupts disabled. Skip tracing in those cases.
*/
test_irqs_unmasked res=x0, pmr=x20
cbz x0, 1f
bl asm_nmi_exit
1:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
#ifdef CONFIG_ARM64_PSEUDO_NMI
test_irqs_unmasked res=x0, pmr=x20
cbnz x0, 1f
#endif
bl trace_hardirqs_on
1:
#endif
mov x0, sp
bl exit_el1_irq_or_nmi
kernel_exit 1
SYM_CODE_END(el1_irq)
@ -714,21 +685,14 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
ct_user_exit_irqoff
user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
#endif
b ret_to_user
SYM_CODE_END(el0_irq)
@ -747,7 +711,7 @@ SYM_CODE_START_LOCAL(el0_error)
el0_error_naked:
mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
ct_user_exit_irqoff
user_exit_irqoff
enable_dbg
mov x0, sp
mov x1, x25
@ -762,13 +726,17 @@ SYM_CODE_END(el0_error)
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ldr x19, [tsk, #TSK_TI_FLAGS]
and x2, x19, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
user_enter_irqoff
/* Ignore asynchronous tag check faults in the uaccess routines */
clear_mte_async_tcf
enable_step_tsk x1, x2
enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
#endif
@ -779,11 +747,9 @@ finish_ret_to_user:
*/
work_pending:
mov x0, sp // 'regs'
mov x1, x19
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)

Просмотреть файл

@ -88,18 +88,3 @@ void __init init_IRQ(void)
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
}
/*
* Stubs to make nmi_enter/exit() code callable from ASM
*/
asmlinkage void notrace asm_nmi_enter(void)
{
nmi_enter();
}
NOKPROBE_SYMBOL(asm_nmi_enter);
asmlinkage void notrace asm_nmi_exit(void)
{
nmi_exit();
}
NOKPROBE_SYMBOL(asm_nmi_exit);

Просмотреть файл

@ -127,7 +127,7 @@ static void *image_load(struct kimage *image,
kernel_segment->mem, kbuf.bufsz,
kernel_segment->memsz);
return 0;
return NULL;
}
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG

Просмотреть файл

@ -189,7 +189,8 @@ long get_mte_ctrl(struct task_struct *task)
switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
return PR_MTE_TCF_NONE;
ret |= PR_MTE_TCF_NONE;
break;
case SCTLR_EL1_TCF0_SYNC:
ret |= PR_MTE_TCF_SYNC;
break;

Просмотреть файл

@ -72,13 +72,13 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
static void __cpu_do_idle(void)
static void noinstr __cpu_do_idle(void)
{
dsb(sy);
wfi();
}
static void __cpu_do_idle_irqprio(void)
static void noinstr __cpu_do_idle_irqprio(void)
{
unsigned long pmr;
unsigned long daif_bits;
@ -108,7 +108,7 @@ static void __cpu_do_idle_irqprio(void)
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
void cpu_do_idle(void)
void noinstr cpu_do_idle(void)
{
if (system_uses_irq_prio_masking())
__cpu_do_idle_irqprio();
@ -119,7 +119,7 @@ void cpu_do_idle(void)
/*
* This is our default idle handler.
*/
void arch_cpu_idle(void)
void noinstr arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
@ -510,14 +510,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
bool prev32, next32;
u64 val;
if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
return;
prev32 = is_compat_thread(task_thread_info(prev));
next32 = is_compat_thread(task_thread_info(next));
if (prev32 == next32)
if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
val = read_sysreg(cntkctl_el1);

Просмотреть файл

@ -119,6 +119,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }

Просмотреть файл

@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
static void cpu_psci_cpu_die(unsigned int cpu)
{
int ret;
/*
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
ret = psci_ops.cpu_off(state);
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
psci_ops.cpu_off(state);
}
static int cpu_psci_cpu_kill(unsigned int cpu)

Просмотреть файл

@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/ptrace.h>
@ -314,7 +315,7 @@ static void __kprobes notrace __sdei_pstate_entry(void)
set_pstate_pan(0);
}
asmlinkage __kprobes notrace unsigned long
asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
@ -325,11 +326,11 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
*/
__sdei_pstate_entry();
nmi_enter();
arm64_enter_nmi(regs);
ret = _sdei_handler(regs, arg);
nmi_exit();
arm64_exit_nmi(regs);
return ret;
}

Просмотреть файл

@ -413,6 +413,7 @@ void cpu_die_early(void)
/* Mark this CPU absent */
set_cpu_present(cpu, 0);
rcu_report_dead(cpu);
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
update_cpu_boot_status(CPU_KILL_ME);

Просмотреть файл

@ -121,7 +121,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
if (flags & _TIF_MTE_ASYNC_FAULT) {
/*

Просмотреть файл

@ -34,6 +34,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/exception.h>
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
@ -754,8 +755,10 @@ const char *esr_get_class_string(u32 esr)
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
arm64_enter_nmi(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
@ -787,7 +790,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
asmlinkage void handle_bad_stack(struct pt_regs *regs)
asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
@ -795,6 +798,8 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
unsigned int esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@ -866,24 +871,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
{
nmi_enter();
arm64_enter_nmi(regs);
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
nmi_exit();
arm64_exit_nmi(regs);
}
asmlinkage void enter_from_user_mode(void)
{
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
}
NOKPROBE_SYMBOL(enter_from_user_mode);
/* GENERIC_BUG traps */
int is_valid_bugaddr(unsigned long addr)

Просмотреть файл

@ -802,25 +802,6 @@ void __init hook_debug_fault_code(int nr,
*/
static void debug_exception_enter(struct pt_regs *regs)
{
/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
* already disabled to preserve the last enabled/disabled addresses.
*/
if (interrupts_enabled(regs))
trace_hardirqs_off();
if (user_mode(regs)) {
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
} else {
/*
* We might have interrupted pretty much anything. In
* fact, if we're a debug exception, we can even interrupt
* NMI processing. We don't want this code makes in_nmi()
* to return true, but we need to notify RCU.
*/
rcu_nmi_enter();
}
preempt_disable();
/* This code is a bit fragile. Test it. */
@ -831,12 +812,6 @@ NOKPROBE_SYMBOL(debug_exception_enter);
static void debug_exception_exit(struct pt_regs *regs)
{
preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
if (interrupts_enabled(regs))
trace_hardirqs_on();
}
NOKPROBE_SYMBOL(debug_exception_exit);

Просмотреть файл

@ -1442,11 +1442,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
}
static bool inside_linear_region(u64 start, u64 size)
{
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
* accommodating both its ends but excluding PAGE_END. Max physical
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
(start + size - 1) <= __pa(PAGE_END - 1);
}
int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
int ret, flags = 0;
if (!inside_linear_region(start, size)) {
pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
return -EINVAL;
}
if (rodata_full || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;

Просмотреть файл

@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(iort_fwnode_lock);
* iort_set_fwnode() - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
* @node: IORT table node associated with the IOMMU
* @iort_node: IORT table node associated with the IOMMU
* @fwnode: fwnode associated with the IORT node
*
* Returns: 0 on success
@ -673,7 +673,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 id,
/**
* iort_get_device_domain() - Find MSI domain related to a device
* @dev: The device.
* @req_id: Requester ID for the device.
* @id: Requester ID for the device.
* @bus_token: irq domain bus token.
*
* Returns: the MSI domain for this device, NULL otherwise
*/
@ -1136,7 +1137,7 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*
* @dev: device to configure
* @dma_addr: device DMA address result pointer
* @size: DMA range size result pointer
* @dma_size: DMA range size result pointer
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
@ -1526,6 +1527,7 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
/**
* iort_add_platform_device() - Allocate a platform device for IORT node
* @node: Pointer to device ACPI IORT node
* @ops: Pointer to IORT device config struct
*
* Returns: 0 on success, <0 failure
*/

Просмотреть файл

@ -4077,7 +4077,6 @@ void rcu_cpu_starting(unsigned int cpu)
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* The outgoing function has no further need of RCU, so remove it from
* the rcu_node tree's ->qsmaskinitnext bit masks.
@ -4117,6 +4116,7 @@ void rcu_report_dead(unsigned int cpu)
rdp->cpu_started = false;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* The outgoing CPU has just passed through the dying-idle state, and we
* are being invoked from the CPU that was IPIed to continue the offline