- Optimise radix KVM guest entry/exit by 2x on Power9/Power10.
 
  - Allow firmware to tell us whether to disable the entry and uaccess flushes on Power10
    or later CPUs.
 
  - Add BPF_PROBE_MEM support for 32 and 64-bit BPF jits.
 
  - Several fixes and improvements to our hard lockup watchdog.
 
  - Activate HAVE_DYNAMIC_FTRACE_WITH_REGS on 32-bit.
 
  - Allow building the 64-bit Book3S kernel without hash MMU support, ie. Radix only.
 
  - Add KUAP (SMAP) support for 40x, 44x, 8xx, Book3E (64-bit).
 
  - Add new encodings for perf_mem_data_src.mem_hops field, and use them on Power10.
 
  - A series of small performance improvements to 64-bit interrupt entry.
 
  - Several commits fixing issues when building with the clang integrated assembler.
 
  - Many other small features and fixes.
 
 Thanks to: Alan Modra, Alexey Kardashevskiy, Ammar Faizi, Anders Roxell, Arnd Bergmann,
 Athira Rajeev, Cédric Le Goater, Christophe JAILLET, Christophe Leroy, Christoph Hellwig,
 Daniel Axtens, David Yang, Erhard Furtner, Fabiano Rosas, Greg Kroah-Hartman, Guo Ren,
 Hari Bathini, Jason Wang, Joel Stanley, Julia Lawall, Kajol Jain, Kees Cook, Laurent
 Dufour, Madhavan Srinivasan, Mark Brown, Minghao Chi, Nageswara R Sastry, Naresh Kamboju,
 Nathan Chancellor, Nathan Lynch, Nicholas Piggin, Nick Child, Oliver O'Halloran, Peiwei
 Hu, Randy Dunlap, Ravi Bangoria, Rob Herring, Russell Currey, Sachin Sant, Sean
 Christopherson, Segher Boessenkool, Thadeu Lima de Souza Cascardo, Tyrel Datwyler, Xiang
 wangx, Yang Guang.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmHhVFMTHG1wZUBlbGxl
 cm1hbi5pZC5hdQAKCRBR6+o8yOGlgKwzD/9UUEZzWyzMVRJvP9FPZByN2M8czxHJ
 tuqEuVqnfks8ad8tfm2ebng5t8ZuVASBQU2fpPA1+lpdvprgZN5RFGMRh729vskn
 2aHQPmFvFObNbXOgCoXzk+C5xYi3zoRMVM968neSPBneYo+xDicn/zN5CHAgsjhX
 +baemJQ7/xzwLiZgTHe8fWw3nTk3IbPBpha59SdTvR8Moy6I4O8CDPIYEm3U3/J3
 x14ZRETqjksL7YOzEBk0avm1dDZRw/johz29oRYSmCj7dyy5OqrkPwokJiRY90eA
 1lVdofDc0zElaSWkVGzKdSWRUIXjKIVdtejvDeEvl6H/mI6q4TVZE8rFmn+3Rvgf
 9q0iKtmw5Kn11cqgY/pgEGmxnQtIdAodNfI/t939E7+O5LbcznuYUiy0J/kTD/vl
 Xduotg2dsCI+5ukf1wrk2wt9LhqZL+ziOeaBhyDM4orV8T3HBYL6zWBptun//IGO
 lK6TvvCHSYnGqY4bnrAmiOnbbEtnP6nN3zbcXgSvPM0wCRHPIEqd0NRXtfISo32d
 vBPq1neXWo4wrRJj9X3yOuP+5fEA4I+hB3yrCJOkcEcz+8NhlboQXU7raVsJL+bd
 kze75H8hwX7kE71oJFFl13LbSNABgiLFARTBXKfvdQA2iLdR0Snvm+OouvwWRPo/
 Po7Nm3zqdLc/1A==
 =BxhQ
 -----END PGP SIGNATURE-----

Merge tag 'powerpc-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:

 - Optimise radix KVM guest entry/exit by 2x on Power9/Power10.

 - Allow firmware to tell us whether to disable the entry and uaccess
   flushes on Power10 or later CPUs.

 - Add BPF_PROBE_MEM support for 32 and 64-bit BPF jits.

 - Several fixes and improvements to our hard lockup watchdog.

 - Activate HAVE_DYNAMIC_FTRACE_WITH_REGS on 32-bit.

 - Allow building the 64-bit Book3S kernel without hash MMU support, ie.
   Radix only.

 - Add KUAP (SMAP) support for 40x, 44x, 8xx, Book3E (64-bit).

 - Add new encodings for perf_mem_data_src.mem_hops field, and use them
   on Power10.

 - A series of small performance improvements to 64-bit interrupt entry.

 - Several commits fixing issues when building with the clang integrated
   assembler.

 - Many other small features and fixes.

Thanks to Alan Modra, Alexey Kardashevskiy, Ammar Faizi, Anders Roxell,
Arnd Bergmann, Athira Rajeev, Cédric Le Goater, Christophe JAILLET,
Christophe Leroy, Christoph Hellwig, Daniel Axtens, David Yang, Erhard
Furtner, Fabiano Rosas, Greg Kroah-Hartman, Guo Ren, Hari Bathini, Jason
Wang, Joel Stanley, Julia Lawall, Kajol Jain, Kees Cook, Laurent Dufour,
Madhavan Srinivasan, Mark Brown, Minghao Chi, Nageswara R Sastry, Naresh
Kamboju, Nathan Chancellor, Nathan Lynch, Nicholas Piggin, Nick Child,
Oliver O'Halloran, Peiwei Hu, Randy Dunlap, Ravi Bangoria, Rob Herring,
Russell Currey, Sachin Sant, Sean Christopherson, Segher Boessenkool,
Thadeu Lima de Souza Cascardo, Tyrel Datwyler, Xiang wangx, and Yang
Guang.

* tag 'powerpc-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (240 commits)
  powerpc/xmon: Dump XIVE information for online-only processors.
  powerpc/opal: use default_groups in kobj_type
  powerpc/cacheinfo: use default_groups in kobj_type
  powerpc/sched: Remove unused TASK_SIZE_OF
  powerpc/xive: Add missing null check after calling kmalloc
  powerpc/floppy: Remove usage of the deprecated "pci-dma-compat.h" API
  selftests/powerpc: Add a test of sigreturning to an unaligned address
  powerpc/64s: Use EMIT_WARN_ENTRY for SRR debug warnings
  powerpc/64s: Mask NIP before checking against SRR0
  powerpc/perf: Fix spelling of "its"
  powerpc/32: Fix boot failure with GCC latent entropy plugin
  powerpc/code-patching: Replace patch_instruction() by ppc_inst_write() in selftests
  powerpc/code-patching: Move code patching selftests in its own file
  powerpc/code-patching: Move instr_is_branch_{i/b}form() in code-patching.h
  powerpc/code-patching: Move patch_exception() outside code-patching.c
  powerpc/code-patching: Use test_trampoline for prefixed patch test
  powerpc/code-patching: Fix patch_branch() return on out-of-range failure
  powerpc/code-patching: Reorganise do_patch_instruction() to ease error handling
  powerpc/code-patching: Fix unmap_patch_area() error handling
  powerpc/code-patching: Fix error handling in do_patch_instruction()
  ...
This commit is contained in:
Linus Torvalds 2022-01-14 15:17:26 +01:00
Родитель 3fb561b1e0 f1aa0e47c2
Коммит 29ec39fcf1
370 изменённых файлов: 5636 добавлений и 3449 удалений

Просмотреть файл

@ -3393,7 +3393,7 @@
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
nosmep [X86,PPC]
nosmep [X86,PPC64s]
Disable SMEP (Supervisor Mode Execution Prevention)
even if it is supported by processor.
@ -4166,6 +4166,14 @@
Override pmtimer IOPort with a hex value.
e.g. pmtmr=0x508
pmu_override= [PPC] Override the PMU.
This option takes over the PMU facility, so it is no
longer usable by perf. Setting this option starts the
PMU counters by setting MMCR0 to 0 (the FC bit is
cleared). If a number is given, then MMCR1 is set to
that number, otherwise (e.g., 'pmu_override=on'), MMCR1
remains 0.
pm_debug_messages [SUSPEND,KNL]
Enable suspend/resume debug messages during boot up.
@ -6494,6 +6502,12 @@
controller on both pseries and powernv
platforms. Only useful on POWER9 and above.
xive.store-eoi=off [PPC]
By default on POWER10 and above, the kernel will use
stores for EOI handling when the XIVE interrupt mode
is active. This option allows the XIVE driver to use
loads instead, as on POWER9.
xhci-hcd.quirks [USB,KNL]
A hex value specifying bitmask with supplemental xhci
host controller quirks. Meaning of each bit can be

Просмотреть файл

@ -129,7 +129,7 @@ config PPC
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PHYS_TO_DMA
@ -165,6 +165,7 @@ config PPC
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS_BYPASS if PPC64
select DMA_OPS if PPC64
@ -205,7 +206,7 @@ config PPC
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
@ -229,7 +230,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS && PPC64
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_OPTPROBES
@ -845,7 +846,7 @@ config FORCE_MAX_ZONEORDER
config PPC_SUBPAGE_PROT
bool "Support setting protections for 4k subpages (subpage_prot syscall)"
default n
depends on PPC_BOOK3S_64 && PPC_64K_PAGES
depends on PPC_64S_HASH_MMU && PPC_64K_PAGES
help
This option adds support for system call to allow user programs
to set access permissions (read/write, readonly, or no access)
@ -943,6 +944,7 @@ config PPC_MEM_KEYS
prompt "PowerPC Memory Protection Keys"
def_bool y
depends on PPC_BOOK3S_64
depends on PPC_64S_HASH_MMU
select ARCH_USES_HIGH_VMA_FLAGS
select ARCH_HAS_PKEYS
help

Просмотреть файл

@ -245,7 +245,9 @@ cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
# mnemonic and failing that it will allow any valid mnemonic that GAS knows
# about. GCC will pass -many to GAS when assembling, clang does not.
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many
# LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
# but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
cpu-as-$(CONFIG_PPC_BOOK3S_64) += $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
@ -445,10 +447,11 @@ PHONY += checkbin
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:
@if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
&& $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
@if test "x${CONFIG_LD_IS_LLD}" != "xy" -a \
"x$(call ld-ifversion, -le, 22400, y)" = "xy" ; then \
echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
echo 'in some circumstances.' ; \
echo '*** binutils 2.23 do not define the TOC symbol ' ; \
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi

Просмотреть файл

@ -28,7 +28,7 @@ p_etext: .8byte _etext
p_bss_start: .8byte __bss_start
p_end: .8byte _end
p_toc: .8byte __toc_start + 0x8000 - p_base
p_toc: .8byte .TOC. - p_base
p_dyn: .8byte __dynamic_start - p_base
p_rela: .8byte __rela_dyn_start - p_base
p_prom: .8byte 0
@ -226,16 +226,19 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
#ifdef __powerpc64__
#define PROM_FRAME_SIZE 512
#define SAVE_GPR(n, base) std n,8*(n)(base)
#define REST_GPR(n, base) ld n,8*(n)(base)
#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
.macro OP_REGS op, width, start, end, base, offset
.Lreg=\start
.rept (\end - \start + 1)
\op .Lreg,\offset+\width*.Lreg(\base)
.Lreg=.Lreg+1
.endr
.endm
#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, 0
#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, 0
#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
#define REST_GPR(n, base) REST_GPRS(n, n, base)
/* prom handles the jump into and return from firmware. The prom args pointer
is loaded in r3. */
@ -246,9 +249,7 @@ prom:
stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
SAVE_GPR(2, r1)
SAVE_GPR(13, r1)
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
SAVE_GPRS(13, 31, r1)
mfcr r10
std r10,8*32(r1)
mfmsr r10
@ -283,9 +284,7 @@ prom:
/* Restore other registers */
REST_GPR(2, r1)
REST_GPR(13, r1)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
REST_GPRS(13, 31, r1)
ld r10,8*32(r1)
mtcr r10

Просмотреть файл

@ -25,14 +25,6 @@
status = "disabled";
};
spi@f00 {
msp430@0 {
compatible = "spidev";
spi-max-frequency = <32000>;
reg = <0>;
};
};
psc@2000 { // PSC1
status = "disabled";
};

Просмотреть файл

@ -34,12 +34,6 @@
#address-cells = <1>;
#size-cells = <0>;
cell-index = <0>;
spidev@0 {
compatible = "spidev";
spi-max-frequency = <250000>;
reg = <0>;
};
};
psc@2200 { // PSC2

Просмотреть файл

@ -36,12 +36,9 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
. = ALIGN(256);
.got :
.got : ALIGN(256)
{
__toc_start = .;
*(.got)
*(.toc)
*(.got .toc)
}
#endif

Просмотреть файл

@ -15,6 +15,8 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_PPC64=y
CONFIG_POWER9_CPU=y
# CONFIG_PPC_64S_HASH_MMU is not set
# CONFIG_PPC_KUEP is not set
# CONFIG_PPC_KUAP is not set
CONFIG_CPU_LITTLE_ENDIAN=y
@ -27,7 +29,6 @@ CONFIG_PPC_MICROWATT=y
CONFIG_CPU_FREQ=y
CONFIG_HZ_100=y
CONFIG_PPC_4K_PAGES=y
# CONFIG_PPC_MEM_KEYS is not set
# CONFIG_SECCOMP is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_COREDUMP is not set

Просмотреть файл

@ -26,7 +26,6 @@ CONFIG_PPC64=y
CONFIG_NR_CPUS=2048
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_PPC_SVM=y

Просмотреть файл

@ -38,7 +38,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_PAPR_SCM=m

Просмотреть файл

@ -38,15 +38,11 @@
#define INITIALIZE \
PPC_STLU r1,-INT_FRAME_SIZE(r1); \
SAVE_8GPRS(14, r1); /* push registers onto stack */ \
SAVE_4GPRS(22, r1); \
SAVE_GPR(26, r1)
SAVE_GPRS(14, 26, r1) /* push registers onto stack */
#define FINALIZE \
REST_8GPRS(14, r1); /* pop registers from stack */ \
REST_4GPRS(22, r1); \
REST_GPR(26, r1); \
addi r1,r1,INT_FRAME_SIZE;
REST_GPRS(14, 26, r1); /* pop registers from stack */ \
addi r1,r1,INT_FRAME_SIZE
#ifdef __BIG_ENDIAN__
#define LOAD_DATA(reg, off) \

Просмотреть файл

@ -125,8 +125,7 @@
_GLOBAL(powerpc_sha_transform)
PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
SAVE_GPRS(14, 31, r1)
/* Load up A - E */
lwz RA(0),0(r3) /* A */
@ -184,7 +183,6 @@ _GLOBAL(powerpc_sha_transform)
stw RD(0),12(r3)
stw RE(0),16(r3)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
REST_GPRS(14, 31, r1)
addi r1,r1,INT_FRAME_SIZE
blr

Просмотреть файл

@ -141,11 +141,6 @@ static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
bool preserve_nv) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
void kvmhv_save_host_pmu(void);
void kvmhv_load_host_pmu(void);
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);

Просмотреть файл

@ -37,62 +37,62 @@ static __inline__ void arch_atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
#asm_op " %0,%2,%0\n" \
#asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3 \n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
#asm_op " %0,%2,%0\n" \
#asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
\
return t; \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
#asm_op " %1,%3,%0\n" \
#asm_op "%I3" suffix " %1,%0,%3\n" \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
: "r" (a), "r" (&v->counter) \
: "cc"); \
: "r"#sign (a), "r" (&v->counter) \
: "cc", ##__VA_ARGS__); \
\
return res; \
}
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
ATOMIC_OPS(add, add, "c", I, "xer")
ATOMIC_OPS(sub, sub, "c", I, "xer")
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
@ -101,13 +101,13 @@ ATOMIC_OPS(sub, subf)
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
#define ATOMIC_OPS(op, asm_op, suffix, sign) \
ATOMIC_OP(op, asm_op, suffix, sign) \
ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
ATOMIC_OPS(and, and, ".", K)
ATOMIC_OPS(or, or, "", K)
ATOMIC_OPS(xor, xor, "", K)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
@ -118,71 +118,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc\n\
addic %0,%0,1\n"
" stwcx. %0,0,%2 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
}
#define arch_atomic_inc arch_atomic_inc
static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
" addic %0,%0,1\n"
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
return t;
}
static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec\n\
addic %0,%0,-1\n"
" stwcx. %0,0,%2\n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
}
#define arch_atomic_dec arch_atomic_dec
static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
" addic %0,%0,-1\n"
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
return t;
}
#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
#define arch_atomic_cmpxchg(v, o, n) \
(arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
@ -241,50 +176,20 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
add%I2c %0,%0,%2 \n"
" stwcx. %0,0,%1 \n\
bne- 1b \n"
PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
" sub%I2c %0,%0,%2 \n\
2:"
: "=&r" (t)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
: "r" (&v->counter), "rI" (a), "r" (u)
: "cc", "memory", "xer");
return t;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
cmpwi 0,%0,0\n\
beq- 2f\n\
addic %1,%0,1\n"
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (t1), "=&r" (t2)
: "r" (&v->counter)
: "cc", "xer", "memory");
return t1;
}
#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if

Просмотреть файл

@ -71,19 +71,61 @@ static inline void fn(unsigned long mask, \
__asm__ __volatile__ ( \
prefix \
"1:" PPC_LLARX "%0,0,%3,0\n" \
stringify_in_c(op) "%0,%0,%2\n" \
#op "%I2 %0,%0,%2\n" \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "rK" (mask), "r" (p) \
: "cc", "memory"); \
}
DEFINE_BITOP(set_bits, or, "")
DEFINE_BITOP(clear_bits, andc, "")
DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
DEFINE_BITOP(change_bits, xor, "")
static __always_inline bool is_rlwinm_mask_valid(unsigned long x)
{
if (!x)
return false;
if (x & 1)
x = ~x; // make the mask non-wrapping
x += x & -x; // adding the low set bit results in at most one bit set
return !(x & (x - 1));
}
#define DEFINE_CLROP(fn, prefix) \
static inline void fn(unsigned long mask, volatile unsigned long *_p) \
{ \
unsigned long old; \
unsigned long *p = (unsigned long *)_p; \
\
if (IS_ENABLED(CONFIG_PPC32) && \
__builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
asm volatile ( \
prefix \
"1:" "lwarx %0,0,%3\n" \
"rlwinm %0,%0,0,%2\n" \
"stwcx. %0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
: "n" (~mask), "r" (p) \
: "cc", "memory"); \
} else { \
asm volatile ( \
prefix \
"1:" PPC_LLARX "%0,0,%3,0\n" \
"andc %0,%0,%2\n" \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "cc", "memory"); \
} \
}
DEFINE_CLROP(clear_bits, "")
DEFINE_CLROP(clear_bits_unlock, PPC_RELEASE_BARRIER)
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
@ -116,12 +158,12 @@ static inline unsigned long fn( \
__asm__ __volatile__ ( \
prefix \
"1:" PPC_LLARX "%0,0,%3,%4\n" \
stringify_in_c(op) "%1,%0,%2\n" \
#op "%I2 %1,%0,%2\n" \
PPC_STLCX "%1,0,%3\n" \
"bne- 1b\n" \
postfix \
: "=&r" (old), "=&r" (t) \
: "r" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
: "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
: "cc", "memory"); \
return (old & mask); \
}
@ -130,11 +172,42 @@ DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
PPC_ACQUIRE_BARRIER, 1)
DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p)
{
unsigned long old, t;
unsigned long *p = (unsigned long *)_p;
if (IS_ENABLED(CONFIG_PPC32) &&
__builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {
asm volatile (
PPC_ATOMIC_ENTRY_BARRIER
"1:" "lwarx %0,0,%3\n"
"rlwinm %1,%0,0,%2\n"
"stwcx. %1,0,%3\n"
"bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (old), "=&r" (t)
: "n" (~mask), "r" (p)
: "cc", "memory");
} else {
asm volatile (
PPC_ATOMIC_ENTRY_BARRIER
"1:" PPC_LLARX "%0,0,%3,0\n"
"andc %1,%0,%2\n"
PPC_STLCX "%1,0,%3\n"
"bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (old), "=&r" (t)
: "r" (mask), "r" (p)
: "cc", "memory");
}
return (old & mask);
}
static inline int arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{

Просмотреть файл

@ -12,50 +12,10 @@
#include <linux/jump_label.h>
extern struct static_key_false disable_kuap_key;
extern struct static_key_false disable_kuep_key;
static __always_inline bool kuap_is_disabled(void)
{
return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
}
static __always_inline bool kuep_is_disabled(void)
{
return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
}
static inline void kuep_lock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) | SR_NX);
/*
* This isync() shouldn't be necessary as the kernel is not excepted to
* run any instruction in userspace soon after the update of segments,
* but hash based cores (at least G3) seem to exhibit a random
* behaviour when the 'isync' is not there. 603 cores don't have this
* behaviour so don't do the 'isync' as it saves several CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
}
static inline void kuep_unlock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) & ~SR_NX);
/*
* This isync() shouldn't be necessary as a 'rfi' will soon be executed
* to return to userspace, but hash based cores (at least G3) seem to
* exhibit a random behaviour when the 'isync' is not there. 603 cores
* don't have this behaviour so don't do the 'isync' as it saves several
* CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
return !IS_ENABLED(CONFIG_PPC_KUEP);
}
#ifdef CONFIG_PPC_KUAP
@ -65,6 +25,11 @@ static inline void kuep_unlock(void)
#define KUAP_NONE (~0UL)
#define KUAP_ALL (~1UL)
static __always_inline bool kuap_is_disabled(void)
{
return static_branch_unlikely(&disable_kuap_key);
}
static inline void kuap_lock_one(unsigned long addr)
{
mtsr(mfsr(addr) | SR_KS, addr);
@ -92,7 +57,7 @@ static inline void kuap_unlock_all(void)
void kuap_lock_all_ool(void);
void kuap_unlock_all_ool(void);
static inline void kuap_lock(unsigned long addr, bool ool)
static inline void kuap_lock_addr(unsigned long addr, bool ool)
{
if (likely(addr != KUAP_ALL))
kuap_lock_one(addr);
@ -112,33 +77,31 @@ static inline void kuap_unlock(unsigned long addr, bool ool)
kuap_unlock_all_ool();
}
static inline void kuap_save_and_lock(struct pt_regs *regs)
static inline void __kuap_lock(void)
{
}
static inline void __kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
if (kuap_is_disabled())
return;
regs->kuap = kuap;
if (unlikely(kuap == KUAP_NONE))
return;
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, false);
kuap_lock_addr(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
{
}
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (kuap_is_disabled())
return;
if (unlikely(kuap != KUAP_NONE)) {
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, false);
kuap_lock_addr(kuap, false);
}
if (likely(regs->kuap == KUAP_NONE))
@ -149,29 +112,18 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
kuap_unlock(regs->kuap, false);
}
static inline unsigned long kuap_get_and_assert_locked(void)
static inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
if (kuap_is_disabled())
return KUAP_NONE;
WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
static inline void kuap_assert_locked(void)
static __always_inline void __allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
kuap_get_and_assert_locked();
}
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
if (kuap_is_disabled())
return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (!(dir & KUAP_WRITE))
@ -181,42 +133,33 @@ static __always_inline void allow_user_access(void __user *to, const void __user
kuap_unlock_one((__force u32)to);
}
static __always_inline void prevent_user_access(unsigned long dir)
static __always_inline void __prevent_user_access(unsigned long dir)
{
u32 kuap = current->thread.kuap;
if (kuap_is_disabled())
return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (!(dir & KUAP_WRITE))
return;
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, true);
kuap_lock_addr(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
if (kuap_is_disabled())
return KUAP_NONE;
if (flags != KUAP_NONE) {
current->thread.kuap = KUAP_NONE;
kuap_lock(flags, true);
kuap_lock_addr(flags, true);
}
return flags;
}
static inline void restore_user_access(unsigned long flags)
static inline void __restore_user_access(unsigned long flags)
{
if (kuap_is_disabled())
return;
if (flags != KUAP_NONE) {
current->thread.kuap = flags;
kuap_unlock(flags, true);
@ -224,13 +167,10 @@ static inline void restore_user_access(unsigned long flags)
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
unsigned long kuap = regs->kuap;
if (kuap_is_disabled())
return false;
if (!is_write || kuap == KUAP_ALL)
return false;
if (kuap == KUAP_NONE)

Просмотреть файл

@ -64,7 +64,82 @@ struct ppc_bat {
#define SR_KP 0x20000000 /* User key */
#define SR_KS 0x40000000 /* Supervisor key */
#ifndef __ASSEMBLY__
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
.macro uus_addi sr reg1 reg2 imm
.if NUM_USER_SEGMENTS > \sr
addi \reg1,\reg2,\imm
.endif
.endm
.macro uus_mtsr sr reg1
.if NUM_USER_SEGMENTS > \sr
mtsr \sr, \reg1
.endif
.endm
/*
* This isync() shouldn't be necessary as the kernel is not excepted to run
* any instruction in userspace soon after the update of segments and 'rfi'
* instruction is used to return to userspace, but hash based cores
* (at least G3) seem to exhibit a random behaviour when the 'isync' is not
* there. 603 cores don't have this behaviour so don't do the 'isync' as it
* saves several CPU cycles.
*/
.macro uus_isync
#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
isync
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#endif
.endm
.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
uus_addi 1, \tmp2, \tmp1, 0x111
uus_addi 2, \tmp3, \tmp1, 0x222
uus_addi 3, \tmp4, \tmp1, 0x333
uus_mtsr 0, \tmp1
uus_mtsr 1, \tmp2
uus_mtsr 2, \tmp3
uus_mtsr 3, \tmp4
uus_addi 4, \tmp1, \tmp1, 0x444
uus_addi 5, \tmp2, \tmp2, 0x444
uus_addi 6, \tmp3, \tmp3, 0x444
uus_addi 7, \tmp4, \tmp4, 0x444
uus_mtsr 4, \tmp1
uus_mtsr 5, \tmp2
uus_mtsr 6, \tmp3
uus_mtsr 7, \tmp4
uus_addi 8, \tmp1, \tmp1, 0x444
uus_addi 9, \tmp2, \tmp2, 0x444
uus_addi 10, \tmp3, \tmp3, 0x444
uus_addi 11, \tmp4, \tmp4, 0x444
uus_mtsr 8, \tmp1
uus_mtsr 9, \tmp2
uus_mtsr 10, \tmp3
uus_mtsr 11, \tmp4
uus_addi 12, \tmp1, \tmp1, 0x444
uus_addi 13, \tmp2, \tmp2, 0x444
uus_addi 14, \tmp3, \tmp3, 0x444
uus_addi 15, \tmp4, \tmp4, 0x444
uus_mtsr 12, \tmp1
uus_mtsr 13, \tmp2
uus_mtsr 14, \tmp3
uus_mtsr 15, \tmp4
uus_isync
.endm
#else
/*
* This macro defines the mapping from contexts to VSIDs (virtual
@ -100,9 +175,14 @@ struct hash_pte {
typedef struct {
unsigned long id;
unsigned long sr0;
void __user *vdso;
} mm_context_t;
#ifdef CONFIG_PPC_KUEP
#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
#endif
void update_bats(void);
static inline void cleanup_cpu_mmu_context(void) { }

Просмотреть файл

@ -99,10 +99,6 @@
* Defines the address of the vmemap area, in its own region on
* hash table CPUs.
*/
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* CONFIG_PPC_MM_SLICES */
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8

Просмотреть файл

@ -229,6 +229,11 @@ static inline u64 current_thread_iamr(void)
#ifdef CONFIG_PPC_KUAP
static __always_inline bool kuap_is_disabled(void)
{
return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
}
static inline void kuap_user_restore(struct pt_regs *regs)
{
bool restore_amr = false, restore_iamr = false;
@ -268,40 +273,38 @@ static inline void kuap_user_restore(struct pt_regs *regs)
*/
}
static inline void kuap_kernel_restore(struct pt_regs *regs,
unsigned long amr)
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
if (unlikely(regs->amr != amr)) {
isync();
mtspr(SPRN_AMR, regs->amr);
/*
* No isync required here because we are about to rfi
* back to previous context before any user accesses
* would be made, which is a CSI.
*/
}
}
if (likely(regs->amr == amr))
return;
isync();
mtspr(SPRN_AMR, regs->amr);
/*
* No isync required here because we are about to rfi
* back to previous context before any user accesses
* would be made, which is a CSI.
*
* No need to restore IAMR when returning to kernel space.
*/
}
static inline unsigned long kuap_get_and_assert_locked(void)
static inline unsigned long __kuap_get_and_assert_locked(void)
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
unsigned long amr = mfspr(SPRN_AMR);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
return amr;
}
return 0;
unsigned long amr = mfspr(SPRN_AMR);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
return amr;
}
static inline void kuap_assert_locked(void)
/* Do nothing, book3s/64 does that in ASM */
static inline void __kuap_lock(void)
{
}
static inline void __kuap_save_and_lock(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
}
/*
@ -339,11 +342,8 @@ static inline void set_kuap(unsigned long value)
isync();
}
static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
bool is_write)
static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return false;
/*
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
* For hash this will be a key fault (DSISR_KEYFAULT)

Просмотреть файл

@ -523,8 +523,14 @@ void slb_save_contents(struct slb_entry *slb_ptr);
void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
void preload_new_slb_context(unsigned long start, unsigned long sp);
#ifdef CONFIG_PPC_64S_HASH_MMU
void slb_set_size(u16 size);
#else
static inline void slb_set_size(u16 size) { }
#endif
#endif /* __ASSEMBLY__ */
/*

Просмотреть файл

@ -4,6 +4,12 @@
#include <asm/page.h>
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#ifndef __ASSEMBLY__
/*
* Page size definition
@ -62,6 +68,9 @@ extern struct patb_entry *partition_tb;
#define PRTS_MASK 0x1f /* process table size field */
#define PRTB_MASK 0x0ffffffffffff000UL
/* Number of supported LPID bits */
extern unsigned int mmu_lpid_bits;
/* Number of supported PID bits */
extern unsigned int mmu_pid_bits;
@ -76,10 +85,8 @@ extern unsigned long __ro_after_init radix_mem_block_size;
#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
#define PRTB_ENTRIES (1ul << mmu_pid_bits)
/*
* Power9 currently only support 64K partition table size.
*/
#define PATB_SIZE_SHIFT 16
#define PATB_SIZE_SHIFT (mmu_lpid_bits + 4)
#define PATB_ENTRIES (1ul << mmu_lpid_bits)
typedef unsigned long mm_context_id_t;
struct spinlock;
@ -98,7 +105,9 @@ typedef struct {
* from EA and new context ids to build the new VAs.
*/
mm_context_id_t id;
#ifdef CONFIG_PPC_64S_HASH_MMU
mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
#endif
};
/* Number of bits in the mm_cpumask */
@ -110,7 +119,9 @@ typedef struct {
/* Number of user space windows opened in process mm_context */
atomic_t vas_windows;
#ifdef CONFIG_PPC_64S_HASH_MMU
struct hash_mm_context *hash_context;
#endif
void __user *vdso;
/*
@ -133,6 +144,7 @@ typedef struct {
#endif
} mm_context_t;
#ifdef CONFIG_PPC_64S_HASH_MMU
static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
{
return ctx->hash_context->user_psize;
@ -193,8 +205,15 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
#else /* CONFIG_PPC_64S_HASH_MMU */
#ifdef CONFIG_PPC_64K_PAGES
#define mmu_virtual_psize MMU_PAGE_64K
#else
#define mmu_virtual_psize MMU_PAGE_4K
#endif
#endif
extern int mmu_vmemmap_psize;
/* MMU initialization */
void mmu_early_init_devtree(void);
@ -233,12 +252,13 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* know which translations we will pick. Hence go with hash
* restrictions.
*/
return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
if (!early_radix_enabled())
hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
}
#ifdef CONFIG_PPC_PSERIES
extern void radix_init_pseries(void);
void __init radix_init_pseries(void);
#else
static inline void radix_init_pseries(void) { }
#endif
@ -255,6 +275,7 @@ static inline void radix_init_pseries(void) { }
void cleanup_cpu_mmu_context(void);
#endif
#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
@ -274,6 +295,7 @@ static inline unsigned long get_user_vsid(mm_context_t *ctx,
return get_vsid(context, ea, ssize);
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */

Просмотреть файл

@ -112,8 +112,14 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Private function for use by PCI IO mapping code */
extern void __flush_hash_table_range(unsigned long start, unsigned long end);
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr);
#else
static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
#endif
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */

Просмотреть файл

@ -14,7 +14,6 @@ enum {
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
};
#ifdef CONFIG_PPC_NATIVE
static inline void tlbiel_all(void)
{
/*
@ -30,9 +29,6 @@ static inline void tlbiel_all(void)
else
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
#else
static inline void tlbiel_all(void) { BUG(); }
#endif
static inline void tlbiel_all_lpid(bool radix)
{

Просмотреть файл

@ -25,6 +25,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
@ -35,6 +36,9 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* waiting for the inevitable extra hash-table miss exception.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
#else
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
#endif
#endif /* __ASSEMBLY__ */
#endif

Просмотреть файл

@ -23,12 +23,12 @@ extern void btext_unmap(void);
extern void btext_drawchar(char c);
extern void btext_drawstring(const char *str);
extern void btext_drawhex(unsigned long v);
extern void btext_drawtext(const char *c, unsigned int len);
void __init btext_drawhex(unsigned long v);
void __init btext_drawtext(const char *c, unsigned int len);
extern void btext_clearscreen(void);
extern void btext_flushscreen(void);
extern void btext_flushline(void);
void __init btext_clearscreen(void);
void __init btext_flushscreen(void);
void __init btext_flushline(void);
#endif /* __KERNEL__ */
#endif /* __PPC_BTEXT_H */

Просмотреть файл

@ -24,20 +24,20 @@
bool is_offset_in_branch_range(long offset);
bool is_offset_in_cond_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const u32 *addr,
int create_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags);
int patch_branch(u32 *addr, unsigned long target, int flags);
int patch_instruction(u32 *addr, struct ppc_inst instr);
int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
int patch_instruction(u32 *addr, ppc_inst_t instr);
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
{
return patch_instruction((u32 *)patch_site_addr(site), instr);
}
@ -58,18 +58,26 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
static inline unsigned int branch_opcode(ppc_inst_t instr)
{
return ppc_inst_primary_opcode(instr) & 0x3F;
}
static inline int instr_is_branch_iform(ppc_inst_t instr)
{
return branch_opcode(instr) == 18;
}
static inline int instr_is_branch_bform(ppc_inst_t instr)
{
return branch_opcode(instr) == 16;
}
int instr_is_relative_branch(ppc_inst_t instr);
int instr_is_relative_link_branch(ppc_inst_t instr);
unsigned long branch_target(const u32 *instr);
int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
#define patch_exception(exc, name) do { \
extern unsigned int name; \
__patch_exception((exc), (unsigned long)&name); \
} while (0)
#endif
int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
bool is_conditional_branch(ppc_inst_t instr);
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 (PPC_RAW_LIS(_R2, 0))

Просмотреть файл

@ -1133,8 +1133,8 @@ enum cpm_clk {
CPM_CLK_DUMMY
};
extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
@ -1143,7 +1143,7 @@ extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
void cpm2_set_pin(int port, int pin, int flags);
void __init cpm2_set_pin(int port, int pin, int flags);
#endif /* __CPM2__ */
#endif /* __KERNEL__ */

Просмотреть файл

@ -85,7 +85,7 @@ extern struct pnv_idle_states_t *pnv_idle_states;
extern int nr_pnv_idle_states;
unsigned long pnv_cpu_offline(unsigned int cpu);
int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
static inline void report_invalid_psscr_val(u64 psscr_val, int err)
{
switch (err) {

Просмотреть файл

@ -32,44 +32,11 @@ extern cpumask_t threads_core_mask;
#define threads_core_mask (*get_cpu_mask(0))
#endif
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
* hit by the argument
*
* @threads: a cpumask of online threads
*
* This function returns a cpumask which will have one online cpu's
* bit set for each core that has at least one thread set in the argument.
*
* This can typically be used for things like IPI for tlb invalidations
* since those need to be done only once per core/TLB
*/
static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
cpumask_t tmp, res;
int i, cpu;
cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) {
cpumask_shift_left(&tmp, &threads_core_mask, i);
if (cpumask_intersects(threads, &tmp)) {
cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
if (cpu < nr_cpu_ids)
cpumask_set_cpu(cpu, &res);
}
}
return res;
}
static inline int cpu_nr_cores(void)
{
return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)
{
return cpu_thread_mask_to_cores(cpu_online_mask);
}
#ifdef CONFIG_SMP
int cpu_core_index_of_thread(int cpu);
int cpu_first_thread_of_core(int core);

Просмотреть файл

@ -460,7 +460,7 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
}
void eeh_cache_debugfs_init(void);
void __init eeh_cache_debugfs_init(void);
#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */

Просмотреть файл

@ -149,6 +149,10 @@ exc_##label##_book3e:
addi r11,r13,PACA_EXTLB; \
TLB_MISS_RESTORE(r11)
#ifndef __ASSEMBLY__
extern unsigned int interrupt_base_book3e;
#endif
#define SET_IVOR(vector_number, vector_offset) \
LOAD_REG_ADDR(r3,interrupt_base_book3e);\
ori r3,r3,vector_offset@l; \

Просмотреть файл

@ -137,10 +137,10 @@ struct fadump_ops {
};
/* Helper functions */
s32 fadump_setup_cpu_notes_buf(u32 num_cpus);
s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus);
void fadump_free_cpu_notes_buf(void);
u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
void fadump_update_elfcore_header(char *bufp);
u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
void __init fadump_update_elfcore_header(char *bufp);
bool is_fadump_boot_mem_contiguous(void);
bool is_fadump_reserved_mem_contiguous(void);

Просмотреть файл

@ -80,8 +80,6 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_NATIVE_POSSIBLE = 0,
FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@ -91,9 +89,6 @@ enum {
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif
0,
FW_FEATURE_ALWAYS =
@ -105,9 +100,6 @@ enum {
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif
FW_FEATURE_POSSIBLE,

Просмотреть файл

@ -134,17 +134,19 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
int dir;
doing_vdma = 0;
dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
dir = (mode == DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
dma_unmap_single(&isa_bridge_pcidev->dev, bus_addr, prev_size,
prev_dir);
bus_addr = 0;
}
if (!bus_addr) /* need to map it */
bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
dir);
/* remember this one as prev */
prev_addr = addr;

Просмотреть файл

@ -98,13 +98,9 @@ start_text:
. = sname##_len;
#define USE_FIXED_SECTION(sname) \
fs_label = start_##sname; \
fs_start = sname##_start; \
use_ftsec sname;
#define USE_TEXT_SECTION() \
fs_label = start_text; \
fs_start = text_start; \
.text
#define CLOSE_FIXED_SECTION(sname) \
@ -161,13 +157,15 @@ name:
* - ABS_ADDR is used to find the absolute address of any symbol, from within
* a fixed section.
*/
#define DEFINE_FIXED_SYMBOL(label) \
label##_absolute = (label - fs_label + fs_start)
// define label as being _in_ sname
#define DEFINE_FIXED_SYMBOL(label, sname) \
label##_absolute = (label - start_ ## sname + sname ## _start)
#define FIXED_SYMBOL_ABS_ADDR(label) \
(label##_absolute)
#define ABS_ADDR(label) (label - fs_label + fs_start)
// find label from _within_ sname
#define ABS_ADDR(label, sname) (label - start_ ## sname + sname ## _start)
#endif /* __ASSEMBLY__ */

Просмотреть файл

@ -15,7 +15,7 @@
extern bool hugetlb_disabled;
void hugetlbpage_init_default(void);
void __init hugetlbpage_init_default(void);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);

Просмотреть файл

@ -10,7 +10,6 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
#include <asm/cpu_has_feature.h>
#include <asm/inst.h>
#ifdef __KERNEL__
struct arch_hw_breakpoint {
@ -56,11 +55,11 @@ static inline int nr_wp_slots(void)
return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1;
}
bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info);
void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea);
#ifdef CONFIG_HAVE_HW_BREAKPOINT

Просмотреть файл

@ -61,7 +61,7 @@
static inline void __hard_irq_enable(void)
{
if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(MSR_EE);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_EIE);
@ -73,7 +73,7 @@ static inline void __hard_irq_enable(void)
static inline void __hard_irq_disable(void)
{
if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(0);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_EID);
@ -85,7 +85,7 @@ static inline void __hard_irq_disable(void)
static inline void __hard_EE_RI_disable(void)
{
if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(0);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_NRI);
@ -97,7 +97,7 @@ static inline void __hard_EE_RI_disable(void)
static inline void __hard_RI_enable(void)
{
if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
return;
if (IS_ENABLED(CONFIG_PPC_8xx))
@ -224,6 +224,42 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
static inline void set_pmi_irq_pending(void)
{
/*
* Invoked from PMU callback functions to set PMI bit in the paca.
* This has to be called with irq's disabled (via hard_irq_disable()).
*/
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(mfmsr() & MSR_EE);
get_paca()->irq_happened |= PACA_IRQ_PMI;
}
static inline void clear_pmi_irq_pending(void)
{
/*
* Invoked from PMU callback functions to clear the pending PMI bit
* in the paca.
*/
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(mfmsr() & MSR_EE);
get_paca()->irq_happened &= ~PACA_IRQ_PMI;
}
static inline bool pmi_irq_pending(void)
{
/*
* Invoked from PMU callback functions to check if there is a pending
* PMI bit in the paca.
*/
if (get_paca()->irq_happened & PACA_IRQ_PMI)
return true;
return false;
}
#ifdef CONFIG_PPC_BOOK3S
/*
* To support disabling and enabling of irq with PMI, set of
@ -306,18 +342,57 @@ static inline bool lazy_irq_pending_nocheck(void)
return __lazy_irq_pending(local_paca->irq_happened);
}
bool power_pmu_wants_prompt_pmi(void);
/*
* This is called by asynchronous interrupts to conditionally
* re-enable hard interrupts after having cleared the source
* of the interrupt. They are kept disabled if there is a different
* soft-masked interrupt pending that requires hard masking.
* This is called by asynchronous interrupts to check whether to
* conditionally re-enable hard interrupts after having cleared
* the source of the interrupt. They are kept disabled if there
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
static inline void may_hard_irq_enable(void)
static inline bool should_hard_irq_enable(void)
{
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(mfmsr() & MSR_EE);
#endif
#ifdef CONFIG_PERF_EVENTS
/*
* If the PMU is not running, there is not much reason to enable
* MSR[EE] in irq handlers because any interrupts would just be
* soft-masked.
*
* TODO: Add test for 64e
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
return false;
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;
return true;
#else
return false;
#endif
}
/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
*/
static inline void do_hard_irq_enable(void)
{
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
#endif
/*
* This allows PMI interrupts (and watchdog soft-NMIs) through.
* There is no other reason to enable this way.
*/
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
@ -398,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
static inline bool may_hard_irq_enable(void)
static inline bool should_hard_irq_enable(void)
{
return false;
}
@ -408,6 +483,10 @@ static inline void do_hard_irq_enable(void)
BUILD_BUG();
}
static inline void clear_pmi_irq_pending(void) { }
static inline void set_pmi_irq_pending(void) { }
static inline bool pmi_irq_pending(void) { return false; }
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
}

Просмотреть файл

@ -7,7 +7,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
extern struct irq_domain *i8259_get_host(void);
struct irq_domain *__init i8259_get_host(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */

Просмотреть файл

@ -3,20 +3,21 @@
#define _ASM_POWERPC_INST_H
#include <asm/ppc-opcode.h>
#ifdef CONFIG_PPC64
#include <asm/reg.h>
#include <asm/disassemble.h>
#include <asm/uaccess.h>
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
long __gui_ret; \
u32 __user *__gui_ptr = (u32 __user *)ptr; \
struct ppc_inst __gui_inst; \
ppc_inst_t __gui_inst; \
unsigned int __prefix, __suffix; \
\
__chk_user_ptr(ptr); \
__gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
if ((__prefix >> 26) == OP_PREFIX) { \
if (IS_ENABLED(CONFIG_PPC64) && (__prefix >> 26) == OP_PREFIX) { \
__gui_ret = gu_op(__suffix, __gui_ptr + 1); \
__gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
@ -27,13 +28,6 @@
} \
__gui_ret; \
})
#else /* !CONFIG_PPC64 */
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
__chk_user_ptr(ptr); \
gu_op((dest).val, (u32 __user *)(ptr)); \
})
#endif /* CONFIG_PPC64 */
#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
@ -43,44 +37,46 @@
* Instruction data type for POWER
*/
struct ppc_inst {
u32 val;
#ifdef CONFIG_PPC64
u32 suffix;
#endif
} __packed;
static inline u32 ppc_inst_val(struct ppc_inst x)
#if defined(CONFIG_PPC64) || defined(__CHECKER__)
static inline u32 ppc_inst_val(ppc_inst_t x)
{
return x.val;
}
static inline int ppc_inst_primary_opcode(struct ppc_inst x)
#define ppc_inst(x) ((ppc_inst_t){ .val = (x) })
#else
static inline u32 ppc_inst_val(ppc_inst_t x)
{
return x;
}
#define ppc_inst(x) (x)
#endif
static inline int ppc_inst_primary_opcode(ppc_inst_t x)
{
return ppc_inst_val(x) >> 26;
}
#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
#define ppc_inst_prefix(x, y) ((ppc_inst_t){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
static inline u32 ppc_inst_suffix(ppc_inst_t x)
{
return x.suffix;
}
#else
#define ppc_inst_prefix(x, y) ppc_inst(x)
#define ppc_inst_prefix(x, y) ((void)y, ppc_inst(x))
static inline u32 ppc_inst_suffix(struct ppc_inst x)
static inline u32 ppc_inst_suffix(ppc_inst_t x)
{
return 0;
}
#endif /* CONFIG_PPC64 */
static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
static inline ppc_inst_t ppc_inst_read(const u32 *ptr)
{
if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
return ppc_inst_prefix(*ptr, *(ptr + 1));
@ -88,17 +84,17 @@ static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
return ppc_inst(*ptr);
}
static inline bool ppc_inst_prefixed(struct ppc_inst x)
static inline bool ppc_inst_prefixed(ppc_inst_t x)
{
return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
static inline ppc_inst_t ppc_inst_swab(ppc_inst_t x)
{
return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
static inline bool ppc_inst_equal(ppc_inst_t x, ppc_inst_t y)
{
if (ppc_inst_val(x) != ppc_inst_val(y))
return false;
@ -107,7 +103,7 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
static inline int ppc_inst_len(struct ppc_inst x)
static inline int ppc_inst_len(ppc_inst_t x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
}
@ -118,14 +114,14 @@ static inline int ppc_inst_len(struct ppc_inst x)
*/
static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
ppc_inst_t tmp;
tmp = ppc_inst_read(value);
return (void *)location + ppc_inst_len(tmp);
}
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
static inline unsigned long ppc_inst_as_ulong(ppc_inst_t x)
{
if (IS_ENABLED(CONFIG_PPC32))
return ppc_inst_val(x);
@ -135,9 +131,17 @@ static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
}
static inline void ppc_inst_write(u32 *ptr, ppc_inst_t x)
{
if (!ppc_inst_prefixed(x))
*ptr = ppc_inst_val(x);
else
*(u64 *)ptr = ppc_inst_as_ulong(x);
}
#define PPC_INST_STR_LEN sizeof("00000000 00000000")
static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_inst x)
static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x)
{
if (ppc_inst_prefixed(x))
sprintf(str, "%08x %08x", ppc_inst_val(x), ppc_inst_suffix(x));
@ -154,6 +158,27 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
{
unsigned int val, suffix;
if (unlikely(!is_kernel_addr((unsigned long)src)))
return -ERANGE;
/* See https://github.com/ClangBuiltLinux/linux/issues/1521 */
#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000
val = suffix = 0;
#endif
__get_kernel_nofault(&val, src, u32, Efault);
if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
__get_kernel_nofault(&suffix, src + 1, u32, Efault);
*inst = ppc_inst_prefix(val, suffix);
} else {
*inst = ppc_inst(val);
}
return 0;
Efault:
return -EFAULT;
}
#endif /* _ASM_POWERPC_INST_H */

Просмотреть файл

@ -97,6 +97,11 @@ static inline void srr_regs_clobbered(void)
local_paca->hsrr_valid = 0;
}
#else
static inline unsigned long search_kernel_restart_table(unsigned long addr)
{
return 0;
}
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
return false;
@ -139,39 +144,68 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
if (user_mode(regs)) {
kuep_lock();
account_cpu_user_entry();
} else {
if (user_mode(regs))
kuap_lock();
else
kuap_save_and_lock(regs);
}
if (user_mode(regs))
account_cpu_user_entry();
#endif
#ifdef CONFIG_PPC64
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
bool trace_enable = false;
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_enable = true;
} else {
irq_soft_mask_set(IRQS_ALL_DISABLED);
}
/*
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
* Asynchronous interrupts get here with HARD_DIS set (see below), so
* this enables MSR[EE] for synchronous interrupts. IRQs remain
* soft-masked. The interrupt handler may later call
* interrupt_cond_local_irq_enable() to achieve a regular process
* context.
*/
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!(regs->msr & MSR_EE));
__hard_irq_enable();
} else {
__hard_RI_enable();
}
/* Do this when RI=1 because it can cause SLB faults */
if (trace_enable)
trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
if (user_mode(regs)) {
kuap_lock();
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
account_cpu_user_entry();
account_stolen_time();
} else {
kuap_save_and_lock(regs);
/*
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
BUG_ON(is_implicit_soft_masked(regs));
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(is_implicit_soft_masked(regs));
}
#ifdef CONFIG_PPC_BOOK3S
/* Move this under a debugging check */
if (arch_irq_disabled_regs(regs))
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
arch_irq_disabled_regs(regs))
BUG_ON(search_kernel_restart_table(regs->nip));
#endif
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
@ -200,13 +234,20 @@ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
#ifdef CONFIG_PPC64
/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
#endif
interrupt_enter_prepare(regs, state);
#ifdef CONFIG_PPC_BOOK3S_64
/*
* RI=1 is set by interrupt_enter_prepare, so this thread flags access
* has to come afterward (it can cause SLB faults).
*/
if (cpu_has_feature(CPU_FTR_CTRL) &&
!test_thread_local_flags(_TLF_RUNLATCH))
__ppc64_runlatch_on();
#endif
interrupt_enter_prepare(regs, state);
irq_enter();
}
@ -276,6 +317,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
regs->softe = IRQS_ALL_DISABLED;
}
__hard_RI_enable();
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
@ -373,6 +416,8 @@ interrupt_handler long func(struct pt_regs *regs) \
{ \
long ret; \
\
__hard_RI_enable(); \
\
ret = ____##func (regs); \
\
return ret; \
@ -564,7 +609,7 @@ DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
/* slb.c */
DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
/* hash_utils.c */
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);

Просмотреть файл

@ -275,7 +275,7 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs);
extern void iommu_init_early_pSeries(void);
void __init iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);

Просмотреть файл

@ -65,7 +65,7 @@ enum ipic_mcp_irq {
IPIC_MCP_MU = 7,
};
extern void ipic_set_default_priority(void);
void __init ipic_set_default_priority(void);
extern u32 ipic_get_mcp_status(void);
extern void ipic_clear_mcp_status(u32 mask);

Просмотреть файл

@ -36,7 +36,7 @@ extern int distribute_irqs;
struct pt_regs;
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#ifdef CONFIG_BOOKE_OR_40x
/*
* Per-cpu stacks for handling critical, debug and machine check
* level interrupts.

Просмотреть файл

@ -84,7 +84,7 @@ extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void crash_kexec_secondary(struct pt_regs *regs);
extern int overlaps_crashkernel(unsigned long start, unsigned long size);
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);

Просмотреть файл

@ -14,6 +14,10 @@
#include <asm/nohash/32/kup-8xx.h>
#endif
#ifdef CONFIG_BOOKE_OR_40x
#include <asm/nohash/kup-booke.h>
#endif
#ifdef CONFIG_PPC_BOOK3S_32
#include <asm/book3s/32/kup.h>
#endif
@ -32,34 +36,29 @@ extern bool disable_kuap;
#include <linux/pgtable.h>
#ifdef CONFIG_PPC_KUEP
void setup_kup(void);
void setup_kuep(bool disabled);
#else
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
static __always_inline bool kuap_is_disabled(void) { return true; }
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
static inline void kuap_assert_locked(void) { }
static inline void kuap_save_and_lock(struct pt_regs *regs) { }
static inline void __kuap_assert_locked(void) { }
static inline void __kuap_lock(void) { }
static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
static inline void kuap_user_restore(struct pt_regs *regs) { }
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
static inline unsigned long kuap_get_and_assert_locked(void)
static inline unsigned long __kuap_get_and_assert_locked(void)
{
return 0;
}
@ -70,20 +69,99 @@ static inline unsigned long kuap_get_and_assert_locked(void)
* platforms.
*/
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
static inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void __prevent_user_access(unsigned long dir) { }
static inline unsigned long __prevent_user_access_return(void) { return 0UL; }
static inline void __restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */
static __always_inline void setup_kup(void)
static __always_inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
setup_kuep(disable_kuep);
setup_kuap(disable_kuap);
if (kuap_is_disabled())
return false;
return __bad_kuap_fault(regs, address, is_write);
}
static __always_inline void kuap_assert_locked(void)
{
if (kuap_is_disabled())
return;
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
__kuap_get_and_assert_locked();
}
static __always_inline void kuap_lock(void)
{
if (kuap_is_disabled())
return;
__kuap_lock();
}
static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
{
if (kuap_is_disabled())
return;
__kuap_save_and_lock(regs);
}
static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
if (kuap_is_disabled())
return;
__kuap_kernel_restore(regs, amr);
}
static __always_inline unsigned long kuap_get_and_assert_locked(void)
{
if (kuap_is_disabled())
return 0;
return __kuap_get_and_assert_locked();
}
#ifndef CONFIG_PPC_BOOK3S_64
static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
if (kuap_is_disabled())
return;
__allow_user_access(to, from, size, dir);
}
static __always_inline void prevent_user_access(unsigned long dir)
{
if (kuap_is_disabled())
return;
__prevent_user_access(dir);
}
static __always_inline unsigned long prevent_user_access_return(void)
{
if (kuap_is_disabled())
return 0;
return __prevent_user_access_return();
}
static __always_inline void restore_user_access(unsigned long flags)
{
if (kuap_is_disabled())
return;
__restore_user_access(flags);
}
#endif /* CONFIG_PPC_BOOK3S_64 */
static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();

Просмотреть файл

@ -79,6 +79,7 @@
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
#define BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER 0x1980
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00

Просмотреть файл

@ -406,6 +406,12 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{
return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
}
static inline bool is_kvmppc_resume_guest(int r)
{
return (r == RESUME_GUEST || r == RESUME_GUEST_NV);

Просмотреть файл

@ -44,7 +44,6 @@ struct kvm_nested_guest {
struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next;
cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest;
short prev_cpu[NR_CPUS];
u8 radix; /* is this nested guest radix */
};
@ -154,7 +153,9 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif

Просмотреть файл

@ -16,7 +16,7 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
int check_kvm_guest(void);
int __init check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
static inline int check_kvm_guest(void) { return 0; }

Просмотреть файл

@ -287,7 +287,6 @@ struct kvm_arch {
u32 online_vcores;
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest;
u8 radix;
u8 fwnmi_enabled;
u8 secure_guest;
@ -579,6 +578,10 @@ struct kvm_vcpu_arch {
ulong cfar;
ulong ppr;
u32 pspb;
u8 load_ebb;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u8 load_tm;
#endif
ulong fscr;
ulong shadow_fscr;
ulong ebbhr;
@ -741,7 +744,7 @@ struct kvm_vcpu_arch {
struct hrtimer dec_timer;
u64 dec_jiffies;
u64 dec_expires;
u64 dec_expires; /* Relative to guest timebase. */
unsigned long pending_exceptions;
u8 ceded;
u8 prodded;

Просмотреть файл

@ -552,8 +552,7 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
struct kvm_nested_guest *nested);
extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
#else
static inline void __init kvm_cma_reserve(void)
@ -760,6 +759,7 @@ void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void);
long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,

Просмотреть файл

@ -235,8 +235,6 @@ extern struct machdep_calls *machine_id;
machine_id == &mach_##name; \
})
extern void probe_machine(void);
#ifdef CONFIG_PPC_PMAC
/*
* Power macintoshes have either a CUDA, PMU or SMU controlling

Просмотреть файл

@ -157,7 +157,7 @@ DECLARE_PER_CPU(int, next_tlbcam_idx);
enum {
MMU_FTRS_POSSIBLE =
#if defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_BOOK3S_604)
#if defined(CONFIG_PPC_BOOK3S_604)
MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_8xx
@ -184,15 +184,18 @@ enum {
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
#endif
#ifdef CONFIG_PPC_BOOK3S_64
MMU_FTR_KERNEL_RO |
#ifdef CONFIG_PPC_64S_HASH_MMU
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
MMU_FTR_GTSE |
#endif /* CONFIG_PPC_RADIX_MMU */
#endif
#ifdef CONFIG_PPC_KUAP
MMU_FTR_BOOK3S_KUAP |
#endif /* CONFIG_PPC_KUAP */
@ -224,6 +227,13 @@ enum {
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
/* BOOK3S_64 options */
#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_RADIX
#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
#endif
#ifndef MMU_FTRS_ALWAYS
#define MMU_FTRS_ALWAYS 0
#endif
@ -329,7 +339,7 @@ static __always_inline bool radix_enabled(void)
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
static inline bool early_radix_enabled(void)
static __always_inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}

Просмотреть файл

@ -71,10 +71,11 @@ static inline void switch_mmu_context(struct mm_struct *prev,
}
extern int hash__alloc_context_id(void);
extern void hash__reserve_context_id(int id);
void __init hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int alloc_extended_context(struct mm_struct *mm,
unsigned long ea)
{
@ -100,6 +101,7 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
return true;
return false;
}
#endif
#else
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,

Просмотреть файл

@ -472,7 +472,7 @@ extern int mpic_cpu_get_priority(void);
extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
void __init mpic_request_ipis(void);
/* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg);

Просмотреть файл

@ -20,11 +20,12 @@ static __always_inline bool kuap_is_disabled(void)
return static_branch_unlikely(&disable_kuap_key);
}
static inline void kuap_save_and_lock(struct pt_regs *regs)
static inline void __kuap_lock(void)
{
if (kuap_is_disabled())
return;
}
static inline void __kuap_save_and_lock(struct pt_regs *regs)
{
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
@ -33,21 +34,15 @@ static inline void kuap_user_restore(struct pt_regs *regs)
{
}
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, regs->kuap);
}
static inline unsigned long kuap_get_and_assert_locked(void)
static inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap;
if (kuap_is_disabled())
return MD_APG_INIT;
kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
@ -56,36 +51,21 @@ static inline unsigned long kuap_get_and_assert_locked(void)
return kuap;
}
static inline void kuap_assert_locked(void)
static inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
kuap_get_and_assert_locked();
}
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
static inline void prevent_user_access(unsigned long dir)
static inline void __prevent_user_access(unsigned long dir)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags;
if (kuap_is_disabled())
return MD_APG_INIT;
flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@ -93,20 +73,14 @@ static inline unsigned long prevent_user_access_return(void)
return flags;
}
static inline void restore_user_access(unsigned long flags)
static inline void __restore_user_access(unsigned long flags)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
if (kuap_is_disabled())
return false;
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}

Просмотреть файл

@ -113,7 +113,6 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */

Просмотреть файл

@ -39,12 +39,10 @@
* 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
* 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
* 2 => User => 11 (all accesses performed according as user iaw page definition)
* 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
* => 10 (all accesses performed according to swaped page definition) for KUEP
* 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
* 4-15 => Not Used
*/
#define MI_APG_INIT 0xdc000000
#define MI_APG_KUEP 0xde000000
#define MI_APG_INIT 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in

Просмотреть файл

@ -313,6 +313,12 @@ extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long phys);
extern void vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
void __patch_exception(int exc, unsigned long addr);
#define patch_exception(exc, name) do { \
extern unsigned int name; \
__patch_exception((exc), (unsigned long)&name); \
} while (0)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */

Просмотреть файл

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_KUP_BOOKE_H_
#define _ASM_POWERPC_KUP_BOOKE_H_
#include <asm/bug.h>
#ifdef CONFIG_PPC_KUAP
#ifdef __ASSEMBLY__
.macro kuap_check_amr gpr1, gpr2
.endm
#else
#include <linux/jump_label.h>
#include <linux/sched.h>
#include <asm/reg.h>
extern struct static_key_false disable_kuap_key;
static __always_inline bool kuap_is_disabled(void)
{
return static_branch_unlikely(&disable_kuap_key);
}
static inline void __kuap_lock(void)
{
mtspr(SPRN_PID, 0);
isync();
}
static inline void __kuap_save_and_lock(struct pt_regs *regs)
{
regs->kuap = mfspr(SPRN_PID);
mtspr(SPRN_PID, 0);
isync();
}
static inline void kuap_user_restore(struct pt_regs *regs)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_PID, current->thread.pid);
/* Context synchronisation is performed by rfi */
}
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (regs->kuap)
mtspr(SPRN_PID, current->thread.pid);
/* Context synchronisation is performed by rfi */
}
static inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap = mfspr(SPRN_PID);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
WARN_ON_ONCE(kuap);
return kuap;
}
static inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
mtspr(SPRN_PID, current->thread.pid);
isync();
}
static inline void __prevent_user_access(unsigned long dir)
{
mtspr(SPRN_PID, 0);
isync();
}
static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags = mfspr(SPRN_PID);
mtspr(SPRN_PID, 0);
isync();
return flags;
}
static inline void __restore_user_access(unsigned long flags)
{
if (flags) {
mtspr(SPRN_PID, current->thread.pid);
isync();
}
}
static inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return !regs->kuap;
}
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_PPC_KUAP */
#endif /* _ASM_POWERPC_KUP_BOOKE_H_ */

Просмотреть файл

@ -1094,6 +1094,7 @@ enum {
OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_STORE_EOI2 = 0x00000040,
};
/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */

Просмотреть файл

@ -314,7 +314,7 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
extern void opal_configure_cores(void);
void __init opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);

Просмотреть файл

@ -97,7 +97,9 @@ struct paca_struct {
/* this becomes non-zero. */
u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_shadow *slb_shadow_ptr;
#endif
struct dtl_entry *dispatch_log;
struct dtl_entry *dispatch_log_end;
#endif
@ -110,6 +112,7 @@ struct paca_struct {
/* used for most interrupts/exceptions */
u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
#ifdef CONFIG_PPC_64S_HASH_MMU
/* SLB related definitions */
u16 vmalloc_sllp;
u8 slb_cache_ptr;
@ -120,6 +123,7 @@ struct paca_struct {
u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
u32 slb_kern_bitmap;
u32 slb_cache[SLB_CACHE_ENTRIES];
#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_BOOK3E
@ -149,6 +153,7 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_64S_HASH_MMU
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
@ -156,6 +161,7 @@ struct paca_struct {
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
#endif
#endif
#endif
/*
@ -268,9 +274,11 @@ struct paca_struct {
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Capture SLB related old contents in MCE handler. */
struct slb_entry *mce_faulty_slbs;
u16 slb_save_cache_ptr;
#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_STACKPROTECTOR
unsigned long canary;

Просмотреть файл

@ -48,7 +48,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#ifdef CONFIG_PCI
extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#endif

Просмотреть файл

@ -98,7 +98,7 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
extern int register_power_pmu(struct power_pmu *);
int __init register_power_pmu(struct power_pmu *pmu);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);

Просмотреть файл

@ -249,6 +249,7 @@
#define PPC_INST_COPY 0x7c20060c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LSWI 0x7c0004aa
@ -393,6 +394,7 @@
(0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
(0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
@ -566,6 +568,8 @@
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
#define PPC_RAW_EIEIO() (0x7c0006ac)
#define PPC_RAW_BRANCH(addr) (PPC_INST_BRANCH | ((addr) & 0x03fffffc))
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
@ -575,6 +579,7 @@
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))
@ -602,6 +607,7 @@
stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
#define PPC_TLBIEL(rb,rs,ric,prs,r) \
stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b))
#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b))

Просмотреть файл

@ -16,30 +16,41 @@
#define SZL (BITS_PER_LONG/8)
/*
* This expands to a sequence of operations with reg incrementing from
* start to end inclusive, of this form:
*
* op reg, (offset + (width * reg))(base)
*
* Note that offset is not the offset of the first operation unless start
* is zero (or width is zero).
*/
.macro OP_REGS op, width, start, end, base, offset
.Lreg=\start
.rept (\end - \start + 1)
\op .Lreg, \offset + \width * .Lreg(\base)
.Lreg=.Lreg+1
.endr
.endm
/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
#ifdef __powerpc64__
#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
#else
#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); REST_10GPRS(22, base)
#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
#endif
#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
#define REST_GPR(n, base) REST_GPRS(n, n, base)
#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)

Просмотреть файл

@ -157,8 +157,12 @@ struct thread_struct {
#ifdef CONFIG_PPC_BOOK3S_32
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
unsigned long lr, ctr;
unsigned long sr0;
#endif
#endif /* CONFIG_PPC32 */
#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
unsigned long pid; /* value written in PID reg. at interrupt exit */
#endif
/* Debug Registers */
struct debug_reg debug;
#ifdef CONFIG_PPC_FPU_REGS
@ -191,8 +195,10 @@ struct thread_struct {
int used_vsr; /* set if process has used VSX */
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
struct_group(spe,
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
);
unsigned long spefscr; /* SPE & eFP status */
unsigned long spefscr_last; /* SPEFSCR value on last prctl
call or trap return */
@ -276,6 +282,12 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
#ifdef CONFIG_PPC_BOOK3S_32
#define SR0_INIT .sr0 = IS_ENABLED(CONFIG_PPC_KUEP) ? SR_NX : 0,
#else
#define SR0_INIT
#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
#define INIT_THREAD { \
.ksp = INIT_SP, \
@ -283,6 +295,7 @@ struct thread_struct {
.kuap = ~0UL, /* KUAP_NONE */ \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
SR0_INIT \
}
#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
@ -290,6 +303,7 @@ struct thread_struct {
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
SR0_INIT \
}
#else
#define INIT_THREAD { \

Просмотреть файл

@ -291,7 +291,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
static inline bool cpu_has_msr_ri(void)
{
return !IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x);
return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
}
static inline bool regs_is_unrecoverable(struct pt_regs *regs)

Просмотреть файл

@ -18,9 +18,9 @@
#include <asm/feature-fixups.h>
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#ifdef CONFIG_BOOKE_OR_40x
#include <asm/reg_booke.h>
#endif /* CONFIG_BOOKE || CONFIG_40x */
#endif
#ifdef CONFIG_FSL_EMB_PERFMON
#include <asm/reg_fsl_emb.h>
@ -1366,6 +1366,18 @@
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#if defined(CONFIG_PPC64) || defined(__CHECKER__)
typedef struct {
u32 val;
#ifdef CONFIG_PPC64
u32 suffix;
#endif
} __packed ppc_inst_t;
#else
typedef u32 ppc_inst_t;
#endif
#define mfmsr() ({unsigned long rval; \
asm volatile("mfmsr %0" : "=r" (rval) : \
: "memory"); rval;})

Просмотреть файл

@ -264,7 +264,7 @@ extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
extern unsigned int rtas_busy_delay_time(int status);
extern unsigned int rtas_busy_delay(int status);
bool rtas_busy_delay(int status);
extern int early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data);

Просмотреть файл

@ -25,16 +25,16 @@ extern char start_virt_trampolines[];
extern char end_virt_trampolines[];
#endif
/*
* This assumes the kernel is never compiled -mcmodel=small or
* the total .toc is always less than 64k.
*/
static inline unsigned long kernel_toc_addr(void)
{
/* Defined by the linker, see vmlinux.lds.S */
extern unsigned long __toc_start;
unsigned long toc_ptr;
/*
* The TOC register (r2) points 32kB into the TOC, so that 64kB of
* the TOC can be addressed using a single machine instruction.
*/
return (unsigned long)(&__toc_start) + 0x8000UL;
asm volatile("mr %0, 2" : "=r" (toc_ptr));
return toc_ptr;
}
static inline int overlaps_interrupt_vector_text(unsigned long start,

Просмотреть файл

@ -9,7 +9,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
@ -32,7 +31,7 @@ void setup_panic(void);
extern bool pseries_enable_reloc_on_exc(void);
extern void pseries_disable_reloc_on_exc(void);
extern void pseries_big_endian_exceptions(void);
extern void pseries_little_endian_exceptions(void);
void __init pseries_little_endian_exceptions(void);
#else
static inline bool pseries_enable_reloc_on_exc(void) { return false; }
static inline void pseries_disable_reloc_on_exc(void) {}
@ -55,7 +54,7 @@ void setup_entry_flush(bool enable);
void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
void __init setup_barrier_nospec(void);
#else
static inline void setup_barrier_nospec(void) { }
#endif
@ -71,11 +70,11 @@ static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
void setup_spectre_v2(void);
void __init setup_spectre_v2(void);
#else
static inline void setup_spectre_v2(void) {}
#endif
void do_btb_flush_fixups(void);
void __init do_btb_flush_fixups(void);
#endif /* !__ASSEMBLY__ */

Просмотреть файл

@ -456,7 +456,7 @@ extern void smu_poll(void);
/*
* Init routine, presence check....
*/
extern int smu_init(void);
int __init smu_init(void);
extern int smu_present(void);
struct platform_device;
extern struct platform_device *smu_get_ofdev(void);

Просмотреть файл

@ -145,7 +145,7 @@ union vsx_reg {
* otherwise.
*/
extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
struct ppc_inst instr);
ppc_inst_t instr);
/*
* Emulate an instruction that can be executed just by updating
@ -162,7 +162,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
* 0 if it could not be emulated, or -1 for an instruction that
* should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
*/
extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
/*
* Emulate a load or store instruction by reading/writing the

Просмотреть файл

@ -112,6 +112,9 @@ static inline void clear_task_ebb(struct task_struct *t)
#endif
}
void kvmppc_save_user_regs(void);
void kvmppc_save_current_sprs(void);
extern int set_thread_tidr(struct task_struct *t);
#endif /* _ASM_POWERPC_SWITCH_TO_H */

Просмотреть файл

@ -44,11 +44,7 @@
*/
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
#define TASK_SIZE_OF(tsk) \
(test_tsk_thread_flag(tsk, TIF_32BIT) ? TASK_SIZE_USER32 : \
TASK_SIZE_USER64)
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_SIZE (is_32bit_task() ? TASK_SIZE_USER32 : TASK_SIZE_USER64)
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))

Просмотреть файл

@ -18,6 +18,8 @@
#include <asm/vdso/timebase.h>
/* time.c */
extern u64 decrementer_max;
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
@ -97,20 +99,17 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
#ifdef CONFIG_PPC64
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
#endif
DECLARE_PER_CPU(u64, decrementers_next_tb);
static inline u64 timer_get_next_tb(void)
{
return __this_cpu_read(decrementers_next_tb);
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void timer_rearm_host_dec(u64 now);
#endif
/* Convert timebase ticks to nanoseconds */
unsigned long long tb_to_ns(unsigned long long tb_ticks);

Просмотреть файл

@ -23,14 +23,14 @@ extern void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void udbg_progress(char *s, unsigned short hex);
extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
void __init udbg_uart_init_pio(unsigned long port, unsigned int stride);
extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
extern unsigned int udbg_probe_uart_speed(unsigned int clock);
void __init udbg_uart_setup(unsigned int speed, unsigned int clock);
unsigned int __init udbg_probe_uart_speed(unsigned int clock);
struct device_node;
extern void udbg_scc_init(int force_scc);
void __init udbg_scc_init(int force_scc);
extern int udbg_adb_init(int force_btext);
extern void udbg_adb_init_early(void);

Просмотреть файл

@ -11,7 +11,6 @@
#include <linux/notifier.h>
#include <asm/probes.h>
#include <asm/inst.h>
typedef ppc_opcode_t uprobe_opcode_t;

Просмотреть файл

@ -38,13 +38,13 @@ static inline int icp_native_init(void) { return -ENODEV; }
/* PAPR ICP */
#ifdef CONFIG_PPC_ICP_HV
extern int icp_hv_init(void);
int __init icp_hv_init(void);
#else
static inline int icp_hv_init(void) { return -ENODEV; }
#endif
#ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void);
int __init icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }

Просмотреть файл

@ -12,7 +12,7 @@
#ifdef CONFIG_XMON
extern void xmon_setup(void);
extern void xmon_register_spus(struct list_head *list);
void __init xmon_register_spus(struct list_head *list);
struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern irqreturn_t xmon_irq(int, void *);

Просмотреть файл

@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)

Просмотреть файл

@ -105,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = {
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
struct ppc_inst ppc_instr)
ppc_inst_t ppc_instr)
{
union {
u64 ll;
@ -300,7 +300,7 @@ Efault_write:
int fix_alignment(struct pt_regs *regs)
{
struct ppc_inst instr;
ppc_inst_t instr;
struct instruction_op op;
int r, type;

Просмотреть файл

@ -54,7 +54,7 @@
#endif
#ifdef CONFIG_PPC32
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#ifdef CONFIG_BOOKE_OR_40x
#include "head_booke.h"
#endif
#endif
@ -139,6 +139,7 @@ int main(void)
OFFSET(THR11, thread_struct, r11);
OFFSET(THLR, thread_struct, lr);
OFFSET(THCTR, thread_struct, ctr);
OFFSET(THSR0, thread_struct, sr0);
#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
@ -218,10 +219,12 @@ int main(void)
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_64S_HASH_MMU
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
#endif
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);

Просмотреть файл

@ -161,7 +161,7 @@ void btext_map(void)
boot_text_mapped = 1;
}
static int btext_initialize(struct device_node *np)
static int __init btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout)
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
if (rc == 0)
if (rc == 0) {
of_node_put(np);
break;
}
}
return rc;
}
@ -290,7 +292,7 @@ void btext_update_display(unsigned long phys, int width, int height,
}
EXPORT_SYMBOL(btext_update_display);
void btext_clearscreen(void)
void __init btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@ -308,7 +310,7 @@ void btext_clearscreen(void)
rmci_maybe_off();
}
void btext_flushscreen(void)
void __init btext_flushscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@ -327,7 +329,7 @@ void btext_flushscreen(void)
__asm__ __volatile__ ("sync" ::: "memory");
}
void btext_flushline(void)
void __init btext_flushline(void)
{
unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@ -542,7 +544,7 @@ void btext_drawstring(const char *c)
btext_drawchar(*c++);
}
void btext_drawtext(const char *c, unsigned int len)
void __init btext_drawtext(const char *c, unsigned int len)
{
if (!boot_text_mapped)
return;
@ -550,7 +552,7 @@ void btext_drawtext(const char *c, unsigned int len)
btext_drawchar(*c++);
}
void btext_drawhex(unsigned long v)
void __init btext_drawhex(unsigned long v)
{
if (!boot_text_mapped)
return;

Просмотреть файл

@ -710,7 +710,7 @@ static struct kobj_attribute cache_shared_cpu_list_attr =
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
/* Attributes which should always be created -- the kobject/sysfs core
* does this automatically via kobj_type->default_attrs. This is the
* does this automatically via kobj_type->default_groups. This is the
* minimum data required to uniquely identify a cache.
*/
static struct attribute *cache_index_default_attrs[] = {
@ -720,6 +720,7 @@ static struct attribute *cache_index_default_attrs[] = {
&cache_shared_cpu_list_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache_index_default);
/* Attributes which should be created if the cache device node has the
* right properties -- see cacheinfo_create_index_opt_attrs
@ -738,7 +739,7 @@ static const struct sysfs_ops cache_index_ops = {
static struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
.default_attrs = cache_index_default_attrs,
.default_groups = cache_index_default_groups,
};
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)

Просмотреть файл

@ -109,7 +109,7 @@ static void init_PMU_HV_ISA207(void)
static void init_PMU(void)
{
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
@ -123,7 +123,7 @@ static void init_PMU_ISA31(void)
{
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
/*
@ -137,6 +137,7 @@ void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t)
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
@ -150,6 +151,7 @@ void __restore_cpu_power7(void)
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
@ -164,6 +166,7 @@ void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t)
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
@ -184,6 +187,7 @@ void __restore_cpu_power8(void)
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
@ -202,6 +206,7 @@ void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@ -223,6 +228,7 @@ void __restore_cpu_power9(void)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@ -242,6 +248,7 @@ void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@ -264,6 +271,7 @@ void __restore_cpu_power10(void)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);

Просмотреть файл

@ -27,7 +27,8 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
may_hard_irq_enable();
if (should_hard_irq_enable())
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);

Просмотреть файл

@ -80,6 +80,7 @@ static void __restore_cpu_cpufeatures(void)
mtspr(SPRN_LPCR, system_registers.lpcr);
if (hv_mode) {
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
mtspr(SPRN_PCR, system_registers.pcr);
}
@ -216,6 +217,7 @@ static int __init feat_enable_hv(struct dt_cpu_feature *f)
}
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_LPES0; /* HV external interrupts */
@ -271,6 +273,9 @@ static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
{
u64 lpcr;
if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
return 0;
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_ISL;
@ -290,6 +295,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
{
u64 lpcr;
if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
return 0;
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
mtspr(SPRN_LPCR, lpcr);
@ -303,15 +311,15 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
{
#ifdef CONFIG_PPC_RADIX_MMU
if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
return 0;
cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
#endif
return 0;
}
static int __init feat_enable_dscr(struct dt_cpu_feature *f)
@ -336,7 +344,7 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
return 1;
}
static void hfscr_pmu_enable(void)
static void __init hfscr_pmu_enable(void)
{
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= PPC_BIT(60);
@ -351,7 +359,7 @@ static void init_pmu_power8(void)
}
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
mtspr(SPRN_MMCRS, 0);
@ -390,7 +398,7 @@ static void init_pmu_power9(void)
mtspr(SPRN_MMCRC, 0);
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
@ -426,7 +434,7 @@ static void init_pmu_power10(void)
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)

Просмотреть файл

@ -280,7 +280,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
void eeh_cache_debugfs_init(void)
void __init eeh_cache_debugfs_init(void)
{
debugfs_create_file_unsafe("eeh_address_cache", 0400,
arch_debugfs_dir, NULL,

Просмотреть файл

@ -905,18 +905,19 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
#endif /* CONFIG_STACKTRACE */
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
edev->mode &= ~EEH_DEV_NO_HANDLER;
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
if (pe->freeze_count > eeh_max_freezes) {
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
result = PCI_ERS_RESULT_DISCONNECT;
}
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
edev->mode &= ~EEH_DEV_NO_HANDLER;
goto recover_failed;
}
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@ -928,39 +929,38 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
* the error. Override the result if necessary to have partially
* hotplug for this case.
*/
if (result != PCI_ERS_RESULT_DISCONNECT) {
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
pe->freeze_count, eeh_max_freezes);
pr_info("EEH: Notify device drivers to shutdown\n");
eeh_set_channel_state(pe, pci_channel_io_frozen);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(IO frozen)", pe,
eeh_report_error, &result);
if ((pe->type & EEH_PE_PHB) &&
result != PCI_ERS_RESULT_NONE &&
result != PCI_ERS_RESULT_NEED_RESET)
result = PCI_ERS_RESULT_NEED_RESET;
}
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
pe->freeze_count, eeh_max_freezes);
pr_info("EEH: Notify device drivers to shutdown\n");
eeh_set_channel_state(pe, pci_channel_io_frozen);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(IO frozen)", pe,
eeh_report_error, &result);
if (result == PCI_ERS_RESULT_DISCONNECT)
goto recover_failed;
/*
* Error logged on a PHB are always fences which need a full
* PHB reset to clear so force that to happen.
*/
if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
*/
if (result != PCI_ERS_RESULT_DISCONNECT) {
rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
pr_warn("EEH: Permanent failure\n");
result = PCI_ERS_RESULT_DISCONNECT;
}
rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
pr_warn("EEH: Permanent failure\n");
goto recover_failed;
}
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed.
*/
if (result != PCI_ERS_RESULT_DISCONNECT) {
pr_info("EEH: Collect temporary log\n");
eeh_slot_error_detail(pe, EEH_LOG_TEMP);
}
pr_info("EEH: Collect temporary log\n");
eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
@ -970,9 +970,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset with hotplug activity\n");
rc = eeh_reset_device(pe, bus, NULL, false);
if (rc) {
pr_warn("%s: Unable to reset, err=%d\n",
__func__, rc);
result = PCI_ERS_RESULT_DISCONNECT;
pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
goto recover_failed;
}
}
@ -980,10 +979,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
if (rc < 0)
goto recover_failed;
if (rc < 0) {
result = PCI_ERS_RESULT_DISCONNECT;
} else if (rc) {
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
@ -991,15 +990,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
eeh_report_mmio_enabled, &result);
}
}
/* If all devices reported they can proceed, then re-enable DMA */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
if (rc < 0)
goto recover_failed;
if (rc < 0) {
result = PCI_ERS_RESULT_DISCONNECT;
} else if (rc) {
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
/*
@ -1017,16 +1014,15 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset without hotplug activity\n");
rc = eeh_reset_device(pe, bus, &rmv_data, true);
if (rc) {
pr_warn("%s: Cannot reset, err=%d\n",
__func__, rc);
result = PCI_ERS_RESULT_DISCONNECT;
} else {
result = PCI_ERS_RESULT_NONE;
eeh_set_channel_state(pe, pci_channel_io_normal);
eeh_set_irq_state(pe, true);
eeh_pe_report("slot_reset", pe, eeh_report_reset,
&result);
pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
goto recover_failed;
}
result = PCI_ERS_RESULT_NONE;
eeh_set_channel_state(pe, pci_channel_io_normal);
eeh_set_irq_state(pe, true);
eeh_pe_report("slot_reset", pe, eeh_report_reset,
&result);
}
if ((result == PCI_ERS_RESULT_RECOVERED) ||
@ -1054,45 +1050,47 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
pr_info("EEH: Recovery successful.\n");
} else {
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
goto out;
}
eeh_slot_error_detail(pe, EEH_LOG_PERM);
recover_failed:
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
/* Notify all devices that they're about to go down. */
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
/* Notify all devices that they're about to go down. */
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
/*
* Shut down the device drivers for good. We mark
* all removed devices correctly to avoid access
* the their PCI config any more.
*/
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
/* The passed PE should no longer be used */
return;
}
/*
* Shut down the device drivers for good. We mark
* all removed devices correctly to avoid access
* the their PCI config any more.
*/
if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
pci_hp_remove_devices(bus);
pci_unlock_rescan_remove();
/* The passed PE should no longer be used */
return;
}
out:

Просмотреть файл

@ -73,13 +73,39 @@ prepare_transfer_to_handler:
_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
.globl __kuep_lock
__kuep_lock:
lwz r9, THREAD+THSR0(r2)
update_user_segments_by_4 r9, r10, r11, r12
blr
__kuep_unlock:
lwz r9, THREAD+THSR0(r2)
rlwinm r9,r9,0,~SR_NX
update_user_segments_by_4 r9, r10, r11, r12
blr
.macro kuep_lock
bl __kuep_lock
.endm
.macro kuep_unlock
bl __kuep_unlock
.endm
#else
.macro kuep_lock
.endm
.macro kuep_unlock
.endm
#endif
.globl transfer_to_syscall
transfer_to_syscall:
stw r11, GPR1(r1)
stw r11, 0(r1)
mflr r12
stw r12, _LINK(r1)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#ifdef CONFIG_BOOKE_OR_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
#endif
lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
@ -90,10 +116,10 @@ transfer_to_syscall:
stw r12,8(r1)
stw r2,_TRAP(r1)
SAVE_GPR(0, r1)
SAVE_4GPRS(3, r1)
SAVE_2GPRS(7, r1)
SAVE_GPRS(3, 8, r1)
addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
kuep_lock
/* Calling convention has r9 = orig r0, r10 = regs */
addi r10,r1,STACK_FRAME_OVERHEAD
@ -110,6 +136,7 @@ ret_from_syscall:
cmplwi cr0,r5,0
bne- 2f
#endif /* CONFIG_PPC_47x */
kuep_unlock
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@ -139,7 +166,7 @@ syscall_exit_finish:
mtxer r5
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
REST_8GPRS(4,r1)
REST_GPRS(4, 11, r1)
lwz r12,GPR12(r1)
b 1b
@ -232,9 +259,9 @@ fast_exception_return:
beq 3f /* if not, we've got problems */
#endif
2: REST_4GPRS(3, r11)
2: REST_GPRS(3, 6, r11)
lwz r10,_CCR(r11)
REST_2GPRS(1, r11)
REST_GPRS(1, 2, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
@ -273,6 +300,7 @@ interrupt_return:
beq .Lkernel_interrupt_return
bl interrupt_exit_user_prepare
cmpwi r3,0
kuep_unlock
bne- .Lrestore_nvgprs
.Lfast_user_interrupt_return:
@ -298,16 +326,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
* the reliable stack unwinder later on. Clear it.
*/
stw r0,8(r1)
REST_4GPRS(7, r1)
REST_2GPRS(11, r1)
REST_GPRS(7, 12, r1)
mtcr r3
mtlr r4
mtctr r5
mtspr SPRN_XER,r6
REST_4GPRS(2, r1)
REST_GPR(6, r1)
REST_GPRS(2, 6, r1)
REST_GPR(0, r1)
REST_GPR(1, r1)
rfi
@ -341,8 +367,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
lwz r6,_CCR(r1)
li r0,0
REST_4GPRS(7, r1)
REST_2GPRS(11, r1)
REST_GPRS(7, 12, r1)
mtlr r3
mtctr r4
@ -354,7 +379,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
*/
stw r0,8(r1)
REST_4GPRS(2, r1)
REST_GPRS(2, 5, r1)
bne- cr1,1f /* emulate stack store */
mtcr r6
@ -430,8 +455,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
bne interrupt_return; \
lwz r0,GPR0(r1); \
lwz r2,GPR2(r1); \
REST_4GPRS(3, r1); \
REST_2GPRS(7, r1); \
REST_GPRS(3, 8, r1); \
lwz r10,_XER(r1); \
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \

Просмотреть файл

@ -180,7 +180,7 @@ _GLOBAL(_switch)
#endif
ld r8,KSP(r4) /* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
@ -232,7 +232,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
slbmte r7,r0
isync
2:
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_64S_HASH_MMU */
clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE

Просмотреть файл

@ -37,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);

Просмотреть файл

@ -198,8 +198,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
stdcx. r0,0,r1 /* to clear the reservation */
REST_4GPRS(2, r1)
REST_4GPRS(6, r1)
REST_GPRS(2, 9, r1)
ld r10,_CTR(r1)
ld r11,_XER(r1)
@ -375,9 +374,7 @@ ret_from_mc_except:
exc_##n##_common: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
std r9,GPR9(r1); /* save r9 in stackframe */ \
SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
@ -1061,9 +1058,7 @@ bad_stack_book3e:
std r11,_ESR(r1)
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
std r9,GPR9(r1); /* save r9 in stackframe */ \
SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \
ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
@ -1077,8 +1072,7 @@ bad_stack_book3e:
std r10,_LINK(r1)
std r11,_CTR(r1)
std r12,_XER(r1)
SAVE_10GPRS(14,r1)
SAVE_8GPRS(24,r1)
SAVE_GPRS(14, 31, r1)
lhz r12,PACA_TRAP_SAVE(r13)
std r12,_TRAP(r1)
addi r11,r1,INT_FRAME_SIZE

Просмотреть файл

@ -48,7 +48,7 @@
.balign IFETCH_ALIGN_BYTES; \
.global name; \
_ASM_NOKPROBE_SYMBOL(name); \
DEFINE_FIXED_SYMBOL(name); \
DEFINE_FIXED_SYMBOL(name, text); \
name:
#define TRAMP_REAL_BEGIN(name) \
@ -76,31 +76,18 @@ name:
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
#define __LOAD_HANDLER(reg, label) \
#define __LOAD_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l
ori reg,reg,(ABS_ADDR(label, section))@l
/*
* Branches from unrelocated code (e.g., interrupts) to labels outside
* head-y require >64K offsets.
*/
#define __LOAD_FAR_HANDLER(reg, label) \
#define __LOAD_FAR_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
ori reg,reg,(ABS_ADDR(label))@l; \
addis reg,reg,(ABS_ADDR(label))@h
/*
* Branch to label using its 0xC000 address. This results in instruction
* address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
* on using mtmsr rather than rfid.
*
* This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
* load KBASE for a slight optimisation.
*/
#define BRANCH_TO_C000(reg, label) \
__LOAD_FAR_HANDLER(reg, label); \
mtctr reg; \
bctr
ori reg,reg,(ABS_ADDR(label, section))@l; \
addis reg,reg,(ABS_ADDR(label, section))@h
/*
* Interrupt code generation macros
@ -111,9 +98,10 @@ name:
#define IAREA .L_IAREA_\name\() /* PACA save area */
#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */
#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */
#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
@ -151,15 +139,18 @@ do_define_int n
.ifndef IISIDE
IISIDE=0
.endif
.ifndef ICFAR
ICFAR=1
.endif
.ifndef ICFAR_IF_HVMODE
ICFAR_IF_HVMODE=0
.endif
.ifndef IDAR
IDAR=0
.endif
.ifndef IDSISR
IDSISR=0
.endif
.ifndef ISET_RI
ISET_RI=1
.endif
.ifndef IBRANCH_TO_COMMON
IBRANCH_TO_COMMON=1
.endif
@ -291,9 +282,21 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
.if ICFAR
BEGIN_FTR_SECTION
mfspr r10,SPRN_CFAR
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.elseif ICFAR_IF_HVMODE
BEGIN_FTR_SECTION
BEGIN_FTR_SECTION_NESTED(69)
mfspr r10,SPRN_CFAR
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(69)
li r10,0
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.endif
.if \ool
.if !\virt
b tramp_real_\name
@ -309,9 +312,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
BEGIN_FTR_SECTION
std r9,IAREA+EX_PPR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.if ICFAR || ICFAR_IF_HVMODE
BEGIN_FTR_SECTION
std r10,IAREA+EX_CFAR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.endif
INTERRUPT_TO_KERNEL
mfctr r10
std r10,IAREA+EX_CTR(r13)
@ -376,7 +381,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
* This switches to virtual mode and sets MSR[RI].
*/
.macro __GEN_COMMON_ENTRY name
DEFINE_FIXED_SYMBOL(\name\()_common_real)
DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
\name\()_common_real:
.if IKVM_REAL
KVMTEST \name kvm_interrupt
@ -399,7 +404,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.endif
.balign IFETCH_ALIGN_BYTES
DEFINE_FIXED_SYMBOL(\name\()_common_virt)
DEFINE_FIXED_SYMBOL(\name\()_common_virt, text)
\name\()_common_virt:
.if IKVM_VIRT
KVMTEST \name kvm_interrupt
@ -413,7 +418,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
* want to run in real mode.
*/
.macro __GEN_REALMODE_COMMON_ENTRY name
DEFINE_FIXED_SYMBOL(\name\()_common_real)
DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
\name\()_common_real:
.if IKVM_REAL
KVMTEST \name kvm_interrupt
@ -512,11 +517,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
stb r10,PACASRR_VALID(r13)
.endif
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
.endif
.if ISTACK
.if IKUAP
kuap_save_amr_and_lock r9, r10, cr1, cr0
@ -568,14 +568,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.endif
BEGIN_FTR_SECTION
.if ICFAR || ICFAR_IF_HVMODE
ld r10,IAREA+EX_CFAR(r13)
.else
li r10,0
.endif
std r10,ORIG_GPR3(r1)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
std r2,GPR2(r1) /* save r2 in stackframe */
SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */
SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
mflr r9 /* Get LR, later save to stack */
ld r2,PACATOC(r13) /* get kernel TOC into r2 */
std r9,_LINK(r1)
@ -693,8 +696,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
mtlr r9
ld r9,_CCR(r1)
mtcr r9
REST_8GPRS(2, r1)
REST_4GPRS(10, r1)
REST_GPRS(2, 13, r1)
REST_GPR(0, r1)
/* restore original r1. */
ld r1,GPR1(r1)
@ -850,12 +852,12 @@ SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
__LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines)
mtctr r10
bctr
TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
__LOAD_HANDLER(r10, system_call_vectored_sigill)
__LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines)
mtctr r10
bctr
#endif
@ -902,11 +904,6 @@ INT_DEFINE_BEGIN(system_reset)
IVEC=0x100
IAREA=PACA_EXNMI
IVIRT=0 /* no virt entry point */
/*
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
* being used, so a nested NMI exception would corrupt it.
*/
ISET_RI=0
ISTACK=0
IKVM_REAL=1
INT_DEFINE_END(system_reset)
@ -964,7 +961,9 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
/* We are waking up from idle, so may clobber any volatile register */
cmpwi cr1,r5,2
bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
__LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines)
mtctr r12
bctr
#endif
#ifdef CONFIG_PPC_PSERIES
@ -979,16 +978,14 @@ TRAMP_REAL_BEGIN(system_reset_fwnmi)
EXC_COMMON_BEGIN(system_reset_common)
__GEN_COMMON_ENTRY system_reset
/*
* Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
* to recover, but nested NMI will notice in_nmi and not recover
* because of the use of the NMI stack. in_nmi reentrancy is tested in
* system_reset_exception.
* Increment paca->in_nmi. When the interrupt entry wrapper later
* enable MSR_RI, then SLB or MCE will be able to recover, but a nested
* NMI will notice in_nmi and not recover because of the use of the NMI
* stack. in_nmi reentrancy is tested in system_reset_exception.
*/
lhz r10,PACA_IN_NMI(r13)
addi r10,r10,1
sth r10,PACA_IN_NMI(r13)
li r10,MSR_RI
mtmsrd r10,1
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
@ -1062,12 +1059,6 @@ INT_DEFINE_BEGIN(machine_check_early)
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
IREALMODE_COMMON=1
/*
* MSR_RI is not enabled, because PACA_EXMC is being used, so a
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
ISET_RI=0
ISTACK=0
IDAR=1
IDSISR=1
@ -1078,7 +1069,6 @@ INT_DEFINE_BEGIN(machine_check)
IVEC=0x200
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
ISET_RI=0
IDAR=1
IDSISR=1
IKVM_REAL=1
@ -1148,9 +1138,6 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
@ -1238,10 +1225,6 @@ EXC_COMMON_BEGIN(machine_check_common)
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
GEN_COMMON machine_check
/* Enable MSR_RI when finished with PACA_EXMC */
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception_async
b interrupt_return_srr
@ -1369,11 +1352,15 @@ EXC_COMMON_BEGIN(data_access_common)
addi r3,r1,STACK_FRAME_OVERHEAD
andis. r0,r4,DSISR_DABRMATCH@h
bne- 1f
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
bl do_hash_fault
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
bl do_page_fault
#endif
b interrupt_return_srr
1: bl do_break
@ -1416,6 +1403,7 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
EXC_COMMON_BEGIN(data_access_slb_common)
GEN_COMMON data_access_slb
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
addi r3,r1,STACK_FRAME_OVERHEAD
@ -1428,9 +1416,12 @@ MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
li r3,-EFAULT
#endif
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
bl do_bad_segment_interrupt
b interrupt_return_srr
@ -1462,11 +1453,15 @@ EXC_VIRT_END(instruction_access, 0x4400, 0x80)
EXC_COMMON_BEGIN(instruction_access_common)
GEN_COMMON instruction_access
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
bl do_hash_fault
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
bl do_page_fault
#endif
b interrupt_return_srr
@ -1496,6 +1491,7 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
EXC_COMMON_BEGIN(instruction_access_slb_common)
GEN_COMMON instruction_access_slb
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
addi r3,r1,STACK_FRAME_OVERHEAD
@ -1508,9 +1504,12 @@ MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
li r3,-EFAULT
#endif
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
bl do_bad_segment_interrupt
b interrupt_return_srr
@ -1536,6 +1535,12 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
*
* If soft masked, the masked handler will note the pending interrupt for
* replay, and clear MSR[EE] in the interrupted context.
*
* CFAR is not required because this is an asynchronous interrupt that in
* general won't have much bearing on the state of the CPU, with the possible
* exception of crash/debug IPIs, but those are generally moving to use SRESET
* IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case
* it may be exiting the guest and need CFAR to be saved.
*/
INT_DEFINE_BEGIN(hardware_interrupt)
IVEC=0x500
@ -1543,6 +1548,10 @@ INT_DEFINE_BEGIN(hardware_interrupt)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
ICFAR=0
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ICFAR_IF_HVMODE=1
#endif
INT_DEFINE_END(hardware_interrupt)
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
@ -1764,6 +1773,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
* If PPC_WATCHDOG is configured, the soft masked handler will actually set
* things back up to run soft_nmi_interrupt as a regular interrupt handler
* on the emergency stack.
*
* CFAR is not required because this is asynchronous (see hardware_interrupt).
* A watchdog interrupt may like to have CFAR, but usually the interesting
* branch is long gone by that point (e.g., infinite loop).
*/
INT_DEFINE_BEGIN(decrementer)
IVEC=0x900
@ -1771,6 +1784,7 @@ INT_DEFINE_BEGIN(decrementer)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
ICFAR=0
INT_DEFINE_END(decrementer)
EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
@ -1846,6 +1860,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
* If soft masked, the masked handler will note the pending interrupt for
* replay, leaving MSR[EE] enabled in the interrupted context because the
* doorbells are edge triggered.
*
* CFAR is not required, similarly to hardware_interrupt.
*/
INT_DEFINE_BEGIN(doorbell_super)
IVEC=0xa00
@ -1853,6 +1869,7 @@ INT_DEFINE_BEGIN(doorbell_super)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
ICFAR=0
INT_DEFINE_END(doorbell_super)
EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
@ -1904,6 +1921,7 @@ INT_DEFINE_BEGIN(system_call)
IVEC=0xc00
IKVM_REAL=1
IKVM_VIRT=1
ICFAR=0
INT_DEFINE_END(system_call)
.macro SYSTEM_CALL virt
@ -1942,12 +1960,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
HMT_MEDIUM
.if ! \virt
__LOAD_HANDLER(r10, system_call_common_real)
__LOAD_HANDLER(r10, system_call_common_real, real_vectors)
mtctr r10
bctr
.else
#ifdef CONFIG_RELOCATABLE
__LOAD_HANDLER(r10, system_call_common)
__LOAD_HANDLER(r10, system_call_common, virt_vectors)
mtctr r10
bctr
#else
@ -2001,7 +2019,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
* outside the head section.
*/
__LOAD_FAR_HANDLER(r10, kvmppc_hcall)
__LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines)
mtctr r10
bctr
#else
@ -2202,6 +2220,11 @@ EXC_COMMON_BEGIN(hmi_exception_common)
* Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
* This is an asynchronous interrupt in response to a msgsnd doorbell.
* Similar to the 0xa00 doorbell but for host rather than guest.
*
* CFAR is not required (similar to doorbell_interrupt), unless KVM HV
* is enabled, in which case it may be a guest exit. Most PowerNV kernels
* include KVM support so it would be nice if this could be dynamically
* patched out if KVM was not currently running any guests.
*/
INT_DEFINE_BEGIN(h_doorbell)
IVEC=0xe80
@ -2209,6 +2232,9 @@ INT_DEFINE_BEGIN(h_doorbell)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ICFAR=0
#endif
INT_DEFINE_END(h_doorbell)
EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
@ -2232,6 +2258,9 @@ EXC_COMMON_BEGIN(h_doorbell_common)
* Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
* This is an asynchronous interrupt in response to an "external exception".
* Similar to 0x500 but for host only.
*
* Like h_doorbell, CFAR is only required for KVM HV because this can be
* a guest exit.
*/
INT_DEFINE_BEGIN(h_virt_irq)
IVEC=0xea0
@ -2239,6 +2268,9 @@ INT_DEFINE_BEGIN(h_virt_irq)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
ICFAR=0
#endif
INT_DEFINE_END(h_virt_irq)
EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
@ -2275,6 +2307,8 @@ EXC_VIRT_NONE(0x4ee0, 0x20)
*
* If soft masked, the masked handler will note the pending interrupt for
* replay, and clear MSR[EE] in the interrupted context.
*
* CFAR is not used by perf interrupts so not required.
*/
INT_DEFINE_BEGIN(performance_monitor)
IVEC=0xf00
@ -2282,6 +2316,7 @@ INT_DEFINE_BEGIN(performance_monitor)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
ICFAR=0
INT_DEFINE_END(performance_monitor)
EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
@ -2706,6 +2741,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
INT_DEFINE_BEGIN(soft_nmi)
IVEC=0x900
ISTACK=0
ICFAR=0
INT_DEFINE_END(soft_nmi)
/*
@ -3025,7 +3061,7 @@ USE_FIXED_SECTION(virt_trampolines)
.align 7
.globl __end_interrupts
__end_interrupts:
DEFINE_FIXED_SYMBOL(__end_interrupts)
DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines)
CLOSE_FIXED_SECTION(real_vectors);
CLOSE_FIXED_SECTION(real_trampolines);

Просмотреть файл

@ -251,7 +251,7 @@ bool is_fadump_reserved_mem_contiguous(void)
}
/* Print firmware assisted dump configurations for debugging purpose. */
static void fadump_show_config(void)
static void __init fadump_show_config(void)
{
int i;
@ -353,7 +353,7 @@ static __init u64 fadump_calculate_reserve_size(void)
* Calculate the total memory size required to be reserved for
* firmware-assisted dump registration.
*/
static unsigned long get_fadump_area_size(void)
static unsigned long __init get_fadump_area_size(void)
{
unsigned long size = 0;
@ -462,7 +462,7 @@ static int __init fadump_get_boot_mem_regions(void)
* with the given memory range.
* False, otherwise.
*/
static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
static bool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
{
bool ret = false;
int i;
@ -737,7 +737,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
fw_dump.ops->fadump_trigger(fdh, str);
}
u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
{
struct elf_prstatus prstatus;
@ -752,7 +752,7 @@ u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
return buf;
}
void fadump_update_elfcore_header(char *bufp)
void __init fadump_update_elfcore_header(char *bufp)
{
struct elf_phdr *phdr;
@ -770,7 +770,7 @@ void fadump_update_elfcore_header(char *bufp)
return;
}
static void *fadump_alloc_buffer(unsigned long size)
static void *__init fadump_alloc_buffer(unsigned long size)
{
unsigned long count, i;
struct page *page;
@ -792,7 +792,7 @@ static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
}
s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
{
/* Allocate buffer to hold cpu crash notes. */
fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
@ -1447,7 +1447,7 @@ static ssize_t release_mem_store(struct kobject *kobj,
}
/* Release the reserved memory and disable the FADump */
static void unregister_fadump(void)
static void __init unregister_fadump(void)
{
fadump_cleanup();
fadump_release_memory(fw_dump.reserve_dump_area_start,
@ -1547,7 +1547,7 @@ ATTRIBUTE_GROUPS(fadump);
DEFINE_SHOW_ATTRIBUTE(fadump_region);
static void fadump_init_files(void)
static void __init fadump_init_files(void)
{
int rc = 0;
@ -1641,6 +1641,14 @@ int __init setup_fadump(void)
else if (fw_dump.reserve_dump_area_size)
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
/*
* In case of panic, fadump is triggered via ppc_panic_event()
* panic notifier. Setting crash_kexec_post_notifiers to 'true'
* lets panic() function take crash friendly path before panic
* notifiers are invoked.
*/
crash_kexec_post_notifiers = true;
return 1;
}
subsys_initcall(setup_fadump);

Просмотреть файл

@ -81,7 +81,12 @@ EXPORT_SYMBOL(store_fp_state)
*/
_GLOBAL(load_up_fpu)
mfmsr r5
#ifdef CONFIG_PPC_BOOK3S_64
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
ori r5,r5,MSR_FP|MSR_RI
#else
ori r5,r5,MSR_FP
#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше