s390 updates for the 5.2 merge window

- Support for kernel address space layout randomization
 
  - Add support for kernel image signature verification
 
  - Convert s390 to the generic get_user_pages_fast code
 
  - Convert s390 to the stack unwind API analog to x86
 
  - Add support for CPU directed interrupts for PCI devices
 
  - Provide support for MIO instructions to the PCI base layer, this
    will allow the use of direct PCI mappings in user space code
 
  - Add the basic KVM guest ultravisor interface for protected VMs
 
  - Add AT_HWCAP bits for several new hardware capabilities
 
  - Update the CPU measurement facility counter definitions to SVN 6
 
  - Arnds cleanup patches for his quest to get LLVM compiles working
 
  - A vfio-ccw update with bug fixes and support for halt and clear
 
  - Improvements for the hardware TRNG code
 
  - Another round of cleanup for the QDIO layer
 
  - Numerous cleanups and bug fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJc0CCEAAoJEDjwexyKj9rgjmkH/A3e2drvuP/hSF3xfCKTQFdx
 /PoLHQVCqENB3HU3FA/ljoXuG6jMgwj61looqlxBNumXFpIfTg0E1JC5S4wRGJ+K
 cOVhIKV53gcuZkRcCJQp0WMnGzpk1Daf7iYXYmAl+7e+mREUPxOuJ0Ei6vXvRGZS
 8cQrUCGrtPgkAeLlndypHI2M2TDDGJIMczOGbOZau8+8Lo7Wq9zt5y0h/v0ew37g
 ogA0eGh6koU1435dt2pclZRiZ1XOcar3Uin9ioT+RnSgJ4pr1Pza/F6IGO0RdQa+
 rva990lqGFp5r9lE4rMCwK9LWb/rfHdVPd35t9XPwphnQ/ORoWUwLk3uc5XOHow=
 =dbuy
 -----END PGP SIGNATURE-----

Merge tag 's390-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

 - Support for kernel address space layout randomization

 - Add support for kernel image signature verification

 - Convert s390 to the generic get_user_pages_fast code

 - Convert s390 to the stack unwind API analog to x86

 - Add support for CPU directed interrupts for PCI devices

 - Provide support for MIO instructions to the PCI base layer, this will
   allow the use of direct PCI mappings in user space code

 - Add the basic KVM guest ultravisor interface for protected VMs

 - Add AT_HWCAP bits for several new hardware capabilities

 - Update the CPU measurement facility counter definitions to SVN 6

 - Arnds cleanup patches for his quest to get LLVM compiles working

 - A vfio-ccw update with bug fixes and support for halt and clear

 - Improvements for the hardware TRNG code

 - Another round of cleanup for the QDIO layer

 - Numerous cleanups and bug fixes

* tag 's390-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (98 commits)
  s390/vdso: drop unnecessary cc-ldoption
  s390: fix clang -Wpointer-sign warnigns in boot code
  s390: drop CONFIG_VIRT_TO_BUS
  s390: boot, purgatory: pass $(CLANG_FLAGS) where needed
  s390: only build for new CPUs with clang
  s390: simplify disabled_wait
  s390/ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  s390/unwind: introduce stack unwind API
  s390/opcodes: add missing instructions to the disassembler
  s390/bug: add entry size to the __bug_table section
  s390: use proper expoline sections for .dma code
  s390/nospec: rename assembler generated expoline thunks
  s390: add missing ENDPROC statements to assembler functions
  locking/lockdep: check for freed initmem in static_obj()
  s390/kernel: add support for kernel address space layout randomization (KASLR)
  s390/kernel: introduce .dma sections
  s390/sclp: do not use static sccbs
  s390/kprobes: use static buffer for insn_page
  s390/kernel: convert SYSCALL and PGM_CHECK handlers to .quad
  s390/kernel: build a relocatable kernel
  ...
This commit is contained in:
Linus Torvalds 2019-05-06 16:42:54 -07:00
Родитель ccbc2e5ed1 ce968f6012
Коммит 14be4c61c2
159 изменённых файлов: 4909 добавлений и 1940 удалений

Просмотреть файл

@ -3429,6 +3429,8 @@
bridges without forcing it upstream. Note:
this removes isolation between devices and
may put more devices in an IOMMU group.
force_floating [S390] Force usage of floating interrupts.
nomio [S390] Do not use MIO instructions.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.

Просмотреть файл

@ -143,6 +143,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_GUP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@ -184,7 +185,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
@ -236,6 +236,7 @@ choice
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES
help
Select this to enable optimizations for model z800/z900 (2064 and
@ -244,6 +245,7 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES
help
Select this to enable optimizations for model z890/z990 (2084 and
@ -252,6 +254,7 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES
help
Select this to enable optimizations for IBM System z9 (2094 and
@ -343,12 +346,15 @@ config TUNE_DEFAULT
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
depends on !CC_IS_CLANG
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
depends on !CC_IS_CLANG
config TUNE_Z9_109
bool "IBM System z9"
depends on !CC_IS_CLANG
config TUNE_Z10
bool "IBM System z10"
@ -384,6 +390,9 @@ config COMPAT
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
config COMPAT_VDSO
def_bool COMPAT && !CC_IS_CLANG
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
@ -545,6 +554,17 @@ config ARCH_HAS_KEXEC_PURGATORY
def_bool y
depends on KEXEC_FILE
config KEXEC_VERIFY_SIG
bool "Verify kernel signature during kexec_file_load() syscall"
depends on KEXEC_FILE && SYSTEM_DATA_VERIFICATION
help
This option makes kernel signature verification mandatory for
the kexec_file_load() syscall.
In addition to that option, you need to enable signature
verification for the corresponding kernel image type being
loaded in order for this to work.
config ARCH_RANDOM
def_bool y
prompt "s390 architectural random number generation API"
@ -605,6 +625,29 @@ config EXPOLINE_FULL
endchoice
config RELOCATABLE
bool "Build a relocatable kernel"
select MODULE_REL_CRCS if MODVERSIONS
default y
help
This builds a kernel image that retains relocation information
so it can be loaded at an arbitrary address.
The kernel is linked as a position-independent executable (PIE)
and contains dynamic relocations which are processed early in the
bootup process.
The relocations make the kernel image about 15% larger (compressed
10%), but are discarded at runtime.
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image (KASLR)"
depends on RELOCATABLE
default y
help
In support of Kernel Address Space Layout Randomization (KASLR),
this randomizes the address at which the kernel image is loaded,
as a security feature that deters exploit attempts relying on
knowledge of the location of kernel internals.
endmenu
menu "Memory setup"
@ -833,6 +876,17 @@ config HAVE_PNETID
menu "Virtualization"
config PROTECTED_VIRTUALIZATION_GUEST
def_bool n
prompt "Protected virtualization guest support"
help
Select this option, if you want to be able to run this
kernel as a protected virtualization KVM guest.
Protected virtualization capable machines have a mini hypervisor
located at machine level (an ultravisor). With help of the
Ultravisor, KVM will be able to run "protected" VMs, special
VMs whose memory and management data are unavailable to KVM.
config PFAULT
def_bool y
prompt "Pseudo page fault support"

Просмотреть файл

@ -16,10 +16,14 @@ KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS += -fPIE
LDFLAGS_vmlinux := -pie
endif
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
@ -111,7 +115,7 @@ endif
cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1)
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_CFLAGS += -pipe -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
export KBUILD_AFLAGS_DECOMPRESSOR

Просмотреть файл

@ -12,25 +12,35 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
#
# Use -march=z900 for als.c to be able to print an error
# Use minimum architecture for als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
ifneq ($(CC_FLAGS_MARCH),-march=z900)
ifndef CONFIG_CC_IS_CLANG
CC_FLAGS_MARCH_MINIMUM := -march=z900
else
CC_FLAGS_MARCH_MINIMUM := -march=z10
endif
ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
AFLAGS_mem.o += -march=z900
AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
CFLAGS_als.o += -march=z900
CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
targets := bzImage startup.a section_cmp.boot.data $(obj-y)
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += ctype.o text_dma.o
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
subdir- := compressed
OBJECTS := $(addprefix $(obj)/,$(obj-y))
@ -48,7 +58,8 @@ define cmd_section_cmp
touch $@
endef
$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
OBJCOPYFLAGS_bzImage := --pad-to $$(readelf -s $(obj)/compressed/vmlinux | awk '/\<_end\>/ {print or(strtonum("0x"$$2),4095)+1}')
$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
$(call if_changed,objcopy)
$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE

Просмотреть файл

@ -99,7 +99,7 @@ static void facility_mismatch(void)
print_machine_type();
print_missing_facilities();
sclp_early_printk("See Principles of Operations for facility bits\n");
disabled_wait(0x8badcccc);
disabled_wait();
}
void verify_facilities(void)

Просмотреть файл

@ -9,5 +9,10 @@ void setup_boot_command_line(void);
void parse_boot_command_line(void);
void setup_memory_end(void);
void print_missing_facilities(void);
unsigned long get_random_base(unsigned long safe_addr);
extern int kaslr_enabled;
unsigned long read_ipl_report(unsigned long safe_offset);
#endif /* BOOT_BOOT_H */

Просмотреть файл

@ -17,6 +17,11 @@ struct vmlinux_info {
unsigned long bss_size; /* uncompressed image .bss size */
unsigned long bootdata_off;
unsigned long bootdata_size;
unsigned long bootdata_preserved_off;
unsigned long bootdata_preserved_size;
unsigned long dynsym_start;
unsigned long rela_dyn_start;
unsigned long rela_dyn_end;
};
extern char _vmlinux_info[];

Просмотреть файл

@ -33,7 +33,29 @@ SECTIONS
*(.data.*)
_edata = . ;
}
/*
* .dma section for code, data, ex_table that need to stay below 2 GB,
* even when the kernel is relocate: above 2 GB.
*/
_sdma = .;
.dma.text : {
. = ALIGN(PAGE_SIZE);
_stext_dma = .;
*(.dma.text)
. = ALIGN(PAGE_SIZE);
_etext_dma = .;
}
. = ALIGN(16);
.dma.ex_table : {
_start_dma_ex_table = .;
KEEP(*(.dma.ex_table))
_stop_dma_ex_table = .;
}
.dma.data : { *(.dma.data) }
_edma = .;
BOOT_DATA
BOOT_DATA_PRESERVED
/*
* uncompressed image info used by the decompressor it should match

Просмотреть файл

@ -305,7 +305,7 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
lctlg %c0,%c15,0x200(%r0) # initialize control registers
lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
@ -319,20 +319,54 @@ ENTRY(startup_kdump)
.align 8
6: .long 0x7fffffff,0xffffffff
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0xffff # cr4: instruction authorization
.quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
.quad 0 # cr9: tracing off
.quad 0 # cr10: tracing off
.quad 0 # cr11: tracing off
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad .Llinkage_stack # cr15: linkage stack operations
.section .dma.data,"aw",@progbits
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
.align 64
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.previous
#include "head_kdump.S"
#
# params at 10400 (setup.h)
# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
.long 0,0 # IPL_DEVICE
.long 0,0 # INITRD_START
.long 0,0 # INITRD_SIZE
.long 0,0 # OLDMEM_BASE
.long 0,0 # OLDMEM_SIZE
.quad 0 # IPL_DEVICE
.quad 0 # INITRD_START
.quad 0 # INITRD_SIZE
.quad 0 # OLDMEM_BASE
.quad 0 # OLDMEM_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000
.org EARLY_SCCB_OFFSET
.fill 4096
.org HEAD_END

Просмотреть файл

@ -7,16 +7,19 @@
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
#include <asm/uv.h>
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
struct ipl_parameter_block __bootdata(early_ipl_block);
int __bootdata(early_ipl_block_valid);
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
int kaslr_enabled __section(.data);
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long)addr;
@ -45,13 +48,15 @@ void store_ipl_parmblock(void)
{
int rc;
rc = __diag308(DIAG308_STORE, &early_ipl_block);
uv_set_shared(__pa(&ipl_block));
rc = __diag308(DIAG308_STORE, &ipl_block);
uv_remove_shared(__pa(&ipl_block));
if (rc == DIAG308_RC_OK &&
early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
early_ipl_block_valid = 1;
ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
ipl_block_valid = 1;
}
static size_t scpdata_length(const char *buf, size_t count)
static size_t scpdata_length(const u8 *buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
@ -68,26 +73,26 @@ static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
size_t i;
int has_lowercase;
count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
ipb->ipl_info.fcp.scp_data_len));
count = min(size - 1, scpdata_length(ipb->fcp.scp_data,
ipb->fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
if (!isascii(ipb->fcp.scp_data[i])) {
count = 0;
goto out;
}
if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
if (!has_lowercase && islower(ipb->fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
memcpy(dest, ipb->fcp.scp_data, count);
else
for (i = 0; i < count; i++)
dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
dest[i] = tolower(ipb->fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
@ -103,14 +108,14 @@ static void append_ipl_block_parm(void)
delim = early_command_line + len; /* '\0' character position */
parm = early_command_line + len + 1; /* append right after '\0' */
switch (early_ipl_block.hdr.pbt) {
case DIAG308_IPL_TYPE_CCW:
switch (ipl_block.pb0_hdr.pbt) {
case IPL_PBT_CCW:
rc = ipl_block_get_ascii_vmparm(
parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
case DIAG308_IPL_TYPE_FCP:
case IPL_PBT_FCP:
rc = ipl_block_get_ascii_scpdata(
parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
}
if (rc) {
@ -141,7 +146,7 @@ void setup_boot_command_line(void)
strcpy(early_command_line, strim(COMMAND_LINE));
/* append IPL PARM data to the boot command line */
if (early_ipl_block_valid)
if (!is_prot_virt_guest() && ipl_block_valid)
append_ipl_block_parm();
}
@ -211,6 +216,7 @@ void parse_boot_command_line(void)
char *args;
int rc;
kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, &param, &val);
@ -228,15 +234,21 @@ void parse_boot_command_line(void)
if (!strcmp(param, "facilities"))
modify_fac_list(val);
if (!strcmp(param, "nokaslr"))
kaslr_enabled = 0;
}
}
void setup_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
if (!OLDMEM_BASE && early_ipl_block_valid &&
early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
if (OLDMEM_BASE) {
kaslr_enabled = 0;
} else if (ipl_block_valid &&
ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1;
}

165
arch/s390/boot/ipl_report.c Normal file
Просмотреть файл

@ -0,0 +1,165 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/ctype.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <uapi/asm/ipl.h>
#include "boot.h"
int __bootdata_preserved(ipl_secure_flag);
unsigned long __bootdata_preserved(ipl_cert_list_addr);
unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
#define for_each_rb_entry(entry, rb) \
for (entry = rb->entries; \
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
entry++)
static inline bool intersects(unsigned long addr0, unsigned long size0,
unsigned long addr1, unsigned long size1)
{
return addr0 + size0 > addr1 && addr1 + size1 > addr0;
}
static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
struct ipl_rb_certificates *certs,
unsigned long safe_addr)
{
struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp;
size_t size;
/*
* Find the length for the IPL report boot data
*/
early_ipl_comp_list_size = 0;
for_each_rb_entry(comp, comps)
early_ipl_comp_list_size += sizeof(*comp);
ipl_cert_list_size = 0;
for_each_rb_entry(cert, certs)
ipl_cert_list_size += sizeof(unsigned int) + cert->len;
size = ipl_cert_list_size + early_ipl_comp_list_size;
/*
* Start from safe_addr to find a free memory area large
* enough for the IPL report boot data. This area is used
* for ipl_cert_list_addr/ipl_cert_list_size and
* early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
* not overlap with any component or any certificate.
*/
repeat:
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
safe_addr = INITRD_START + INITRD_SIZE;
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;
goto repeat;
}
early_ipl_comp_list_addr = safe_addr;
ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
return safe_addr + size;
}
static void copy_components_bootdata(struct ipl_rb_components *comps)
{
struct ipl_rb_component_entry *comp, *ptr;
ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
for_each_rb_entry(comp, comps)
memcpy(ptr++, comp, sizeof(*ptr));
}
static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
{
struct ipl_rb_certificate_entry *cert;
void *ptr;
ptr = (void *) ipl_cert_list_addr;
for_each_rb_entry(cert, certs) {
*(unsigned int *) ptr = cert->len;
ptr += sizeof(unsigned int);
memcpy(ptr, (void *) cert->addr, cert->len);
ptr += cert->len;
}
}
unsigned long read_ipl_report(unsigned long safe_addr)
{
struct ipl_rb_certificates *certs;
struct ipl_rb_components *comps;
struct ipl_pl_hdr *pl_hdr;
struct ipl_rl_hdr *rl_hdr;
struct ipl_rb_hdr *rb_hdr;
unsigned long tmp;
void *rl_end;
/*
* Check if there is a IPL report by looking at the copy
* of the IPL parameter information block.
*/
if (!ipl_block_valid ||
!(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
return safe_addr;
ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
/*
* There is an IPL report, to find it load the pointer to the
* IPL parameter information block from lowcore and skip past
* the IPL parameter list, then align the address to a double
* word boundary.
*/
tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
pl_hdr = (struct ipl_pl_hdr *) tmp;
tmp = (tmp + pl_hdr->len + 7) & -8UL;
rl_hdr = (struct ipl_rl_hdr *) tmp;
/* Walk through the IPL report blocks in the IPL Report list */
certs = NULL;
comps = NULL;
rl_end = (void *) rl_hdr + rl_hdr->len;
rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
(void *) rb_hdr + rb_hdr->len <= rl_end) {
switch (rb_hdr->rbt) {
case IPL_RBT_CERTIFICATES:
certs = (struct ipl_rb_certificates *) rb_hdr;
break;
case IPL_RBT_COMPONENTS:
comps = (struct ipl_rb_components *) rb_hdr;
break;
default:
break;
}
rb_hdr = (void *) rb_hdr + rb_hdr->len;
}
/*
* With either the component list or the certificate list
* missing the kernel will stay ignorant of secure IPL.
*/
if (!comps || !certs)
return safe_addr;
/*
* Copy component and certificate list to a safe area
* where the decompressed kernel can find them.
*/
safe_addr = find_bootdata_space(comps, certs, safe_addr);
copy_components_bootdata(comps);
copy_certificates_bootdata(certs);
return safe_addr;
}

144
arch/s390/boot/kaslr.c Normal file
Просмотреть файл

@ -0,0 +1,144 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2019
*/
#include <asm/mem_detect.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
#include "compressed/decompressor.h"
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
#define PRNG_MODE_TRNG 3
struct prno_parm {
u32 res;
u32 reseed_counter;
u64 stream_bytes;
u8 V[112];
u8 C[112];
};
struct prng_parm {
u8 parm_block[32];
u32 reseed_counter;
u64 byte_counter;
};
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
return PRNG_MODE_TRNG;
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
return PRNG_MODE_SHA512;
else
return PRNG_MODE_TDES;
}
static unsigned long get_random(unsigned long limit)
{
struct prng_parm prng = {
/* initial parameter block for tdes mode, copied from libica */
.parm_block = {
0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
},
};
unsigned long seed, random;
struct prno_parm prno;
__u64 entropy[4];
int mode, i;
mode = check_prng();
seed = get_tod_clock_fast();
switch (mode) {
case PRNG_MODE_TRNG:
cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
break;
case PRNG_MODE_SHA512:
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
(u8 *) &seed, sizeof(seed));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
sizeof(random), NULL, 0);
break;
case PRNG_MODE_TDES:
/* add entropy */
*(unsigned long *) prng.parm_block ^= seed;
for (i = 0; i < 16; i++) {
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
(char *) entropy, (char *) entropy,
sizeof(entropy));
memcpy(prng.parm_block, entropy, sizeof(entropy));
}
random = seed;
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
(u8 *) &random, sizeof(random));
break;
default:
random = 0;
}
return random % limit;
}
unsigned long get_random_base(unsigned long safe_addr)
{
unsigned long base, start, end, kernel_size;
unsigned long block_sum, offset;
int i;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
if (safe_addr < INITRD_START + INITRD_SIZE)
safe_addr = INITRD_START + INITRD_SIZE;
}
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
kernel_size = vmlinux.image_size + vmlinux.bss_size;
block_sum = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
break;
if (end > memory_end)
end = memory_end;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
}
if (!block_sum) {
sclp_early_printk("KASLR disabled: not enough memory\n");
return 0;
}
base = get_random(block_sum);
if (base == 0)
return 0;
if (base < safe_addr)
base = safe_addr;
block_sum = offset = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
break;
if (end > memory_end)
end = memory_end;
}
if (end - start < kernel_size)
continue;
block_sum += end - start - kernel_size;
if (base <= block_sum) {
base = start + base - offset;
base = ALIGN_DOWN(base, THREAD_SIZE);
break;
}
offset = block_sum;
}
return base;
}

Просмотреть файл

@ -0,0 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/machine_kexec_reloc.c"

Просмотреть файл

@ -1,11 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/uv.h>
#include "compressed/decompressor.h"
#include "boot.h"
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .dma section, and its location is passed
* over to the decompressed / relocated kernel via the .boot.preserved.data
* section.
*/
extern char _sdma[], _edma[];
extern char _stext_dma[], _etext_dma[];
extern struct exception_table_entry _start_dma_ex_table[];
extern struct exception_table_entry _stop_dma_ex_table[];
unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
struct exception_table_entry *
__bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
struct exception_table_entry *
__bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
int _diag210_dma(struct diag210 *addr);
int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
void _diag0c_dma(struct hypfs_diag0c_entry *entry);
void _diag308_reset_dma(void);
struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag210 = _diag210_dma,
.diag26c = _diag26c_dma,
.diag14 = _diag14_dma,
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
};
static struct diag210 _diag210_tmp_dma __section(".dma.data");
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
void error(char *x)
{
@ -13,7 +57,7 @@ void error(char *x)
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
disabled_wait(0xdeadbeef);
disabled_wait();
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
@ -23,19 +67,16 @@ unsigned long mem_safe_offset(void)
}
#endif
static void rescue_initrd(void)
static void rescue_initrd(unsigned long addr)
{
unsigned long min_initrd_addr;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
min_initrd_addr = mem_safe_offset();
if (min_initrd_addr <= INITRD_START)
if (addr <= INITRD_START)
return;
memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
INITRD_START = addr;
}
static void copy_bootdata(void)
@ -43,23 +84,81 @@ static void copy_bootdata(void)
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
error(".boot.data section size mismatch");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
error(".boot.preserved.data section size mismatch");
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
}
static void handle_relocs(unsigned long offset)
{
Elf64_Rela *rela_start, *rela_end, *rela;
int r_type, r_sym, rc;
Elf64_Addr loc, val;
Elf64_Sym *dynsym;
rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
for (rela = rela_start; rela < rela_end; rela++) {
loc = rela->r_offset + offset;
val = rela->r_addend + offset;
r_sym = ELF64_R_SYM(rela->r_info);
if (r_sym)
val += dynsym[r_sym].st_value;
r_type = ELF64_R_TYPE(rela->r_info);
rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
if (rc)
error("Unknown relocation type");
}
}
void startup_kernel(void)
{
unsigned long random_lma;
unsigned long safe_addr;
void *img;
rescue_initrd();
sclp_early_read_info();
store_ipl_parmblock();
safe_addr = mem_safe_offset();
safe_addr = read_ipl_report(safe_addr);
uv_query_info();
rescue_initrd(safe_addr);
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
setup_memory_end();
detect_memory();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(safe_addr);
if (random_lma) {
__kaslr_offset = random_lma - vmlinux.default_lma;
img = (void *)vmlinux.default_lma;
vmlinux.default_lma += __kaslr_offset;
vmlinux.entry += __kaslr_offset;
vmlinux.bootdata_off += __kaslr_offset;
vmlinux.bootdata_preserved_off += __kaslr_offset;
vmlinux.rela_dyn_start += __kaslr_offset;
vmlinux.rela_dyn_end += __kaslr_offset;
vmlinux.dynsym_start += __kaslr_offset;
}
}
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
}
} else if (__kaslr_offset)
memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
copy_bootdata();
if (IS_ENABLED(CONFIG_RELOCATABLE))
handle_relocs(__kaslr_offset);
if (__kaslr_offset) {
/* Clear non-relocated kernel */
if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
memset(img, 0, vmlinux.image_size);
}
vmlinux.entry();
}

184
arch/s390/boot/text_dma.S Normal file
Просмотреть файл

@ -0,0 +1,184 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code that needs to run below 2 GB.
*
* Copyright IBM Corp. 2019
*/
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/sigp.h>
#ifdef CC_USING_EXPOLINE
.pushsection .dma.text.__s390_indirect_jump_r14,"axG"
__dma__s390_indirect_jump_r14:
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.popsection
#endif
.section .dma.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
* because they might be more than 2 GB away, and not reachable by the relative
* branch. No comdat, exrl, etc. optimizations used here, because it only
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
#ifdef CC_USING_EXPOLINE
jg __dma__s390_indirect_jump_r14
#else
br %r14
#endif
.endm
/*
* int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
*/
ENTRY(_diag14_dma)
lgr %r1,%r2
lgr %r2,%r3
lgr %r3,%r4
lhi %r5,-EIO
sam31
diag %r1,%r2,0x14
.Ldiag14_ex:
ipm %r5
srl %r5,28
.Ldiag14_fault:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
ENDPROC(_diag14_dma)
/*
* int _diag210_dma(struct diag210 *addr)
*/
ENTRY(_diag210_dma)
lgr %r1,%r2
lhi %r2,-1
sam31
diag %r1,%r0,0x210
.Ldiag210_ex:
ipm %r2
srl %r2,28
.Ldiag210_fault:
sam64
lgfr %r2,%r2
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
ENDPROC(_diag210_dma)
/*
* int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
*/
ENTRY(_diag26c_dma)
lghi %r5,-EOPNOTSUPP
sam31
diag %r2,%r4,0x26c
.Ldiag26c_ex:
sam64
lgfr %r2,%r5
BR_EX_DMA_r14
EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
ENDPROC(_diag26c_dma)
/*
* void _diag0c_dma(struct hypfs_diag0c_entry *entry)
*/
ENTRY(_diag0c_dma)
sam31
diag %r2,%r2,0x0c
sam64
BR_EX_DMA_r14
ENDPROC(_diag0c_dma)
/*
* void _swsusp_reset_dma(void)
*/
ENTRY(_swsusp_reset_dma)
larl %r1,restart_entry
larl %r2,.Lrestart_diag308_psw
og %r1,0(%r2)
stg %r1,0(%r0)
lghi %r0,0
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
BR_EX_DMA_r14
ENDPROC(_swsusp_reset_dma)
/*
* void _diag308_reset_dma(void)
*
* Calls diag 308 subcode 1 and continues execution
*/
ENTRY(_diag308_reset_dma)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff
larl %r4,.Lctlreg0
stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4)
larl %r4,.Lfpctl # Floating point control register
stfpc 0(%r4)
larl %r4,.Lprefix # Save prefix register
stpx 0(%r4)
larl %r4,.Lprefix_zero # Set prefix register to 0
spx 0(%r4)
larl %r4,.Lcontinue_psw # Save PSW flags
epsw %r2,%r3
stm %r2,%r3,0(%r4)
larl %r4,restart_part2 # Setup restart PSW at absolute 0
larl %r3,.Lrestart_diag308_psw
og %r4,0(%r3) # Save PSW
lghi %r3,0
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
lghi %r0,0
diag %r0,%r1,0x308
restart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register
lfpc 0(%r4)
larl %r4,.Lprefix # Restore prefix register
spx 0(%r4)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
BR_EX_DMA_r14
ENDPROC(_diag308_reset_dma)
.section .dma.data,"aw",@progbits
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
.align 8
.Lcontinue_psw:
.quad 0,.Lcontinue
.align 8
.Lctlreg0:
.quad 0
.Lctlregs:
.rept 16
.quad 0
.endr
.Lfpctl:
.long 0
.Lprefix:
.long 0
.Lprefix_zero:
.long 0

24
arch/s390/boot/uv.c Normal file
Просмотреть файл

@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/uv.h>
#include <asm/facility.h>
#include <asm/sections.h>
int __bootdata_preserved(prot_virt_guest);
void uv_query_info(void)
{
struct uv_cb_qui uvcb = {
.header.cmd = UVC_CMD_QUI,
.header.len = sizeof(uvcb)
};
if (!test_facility(158))
return;
if (uv_call(0, (uint64_t)&uvcb))
return;
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1;
}

Просмотреть файл

@ -64,6 +64,7 @@ CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y

Просмотреть файл

@ -65,6 +65,7 @@ CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y

Просмотреть файл

@ -207,5 +207,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone:
VLGVF %r2,%v2,3
BR_EX %r14
ENDPROC(crc32_be_vgfm_16)
.previous

Просмотреть файл

@ -105,13 +105,14 @@
ENTRY(crc32_le_vgfm_16)
larl %r5,.Lconstants_CRC_32_LE
j crc32_le_vgfm_generic
ENDPROC(crc32_le_vgfm_16)
ENTRY(crc32c_le_vgfm_16)
larl %r5,.Lconstants_CRC_32C_LE
j crc32_le_vgfm_generic
ENDPROC(crc32c_le_vgfm_16)
crc32_le_vgfm_generic:
ENTRY(crc32_le_vgfm_generic)
/* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
@ -267,5 +268,6 @@ crc32_le_vgfm_generic:
.Ldone:
VLGVF %r2,%v2,2
BR_EX %r14
ENDPROC(crc32_le_vgfm_generic)
.previous

Просмотреть файл

@ -61,6 +61,7 @@ static unsigned int prng_reseed_limit;
module_param_named(reseed_limit, prng_reseed_limit, int, 0);
MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
static bool trng_available;
/*
* Any one who considers arithmetical methods of producing random digits is,
@ -115,46 +116,68 @@ static const u8 initial_parm_block[32] __initconst = {
/*
* generate_entropy:
* This algorithm produces 64 bytes of entropy data based on 1024
* individual stckf() invocations assuming that each stckf() value
* contributes 0.25 bits of entropy. So the caller gets 256 bit
* entropy per 64 byte or 4 bits entropy per byte.
* This function fills a given buffer with random bytes. The entropy within
* the random bytes given back is assumed to have at least 50% - meaning
* a 64 bytes buffer has at least 64 * 8 / 2 = 256 bits of entropy.
* Within the function the entropy generation is done in junks of 64 bytes.
* So the caller should also ask for buffer fill in multiples of 64 bytes.
* The generation of the entropy is based on the assumption that every stckf()
* invocation produces 0.5 bits of entropy. To accumulate 256 bits of entropy
* at least 512 stckf() values are needed. The entropy relevant part of the
* stckf value is bit 51 (counting starts at the left with bit nr 0) so
* here we use the lower 4 bytes and exor the values into 2k of bufferspace.
* To be on the save side, if there is ever a problem with stckf() the
* other half of the page buffer is filled with bytes from urandom via
* get_random_bytes(), so this function consumes 2k of urandom for each
* requested 64 bytes output data. Finally the buffer page is condensed into
* a 64 byte value by hashing with a SHA512 hash.
*/
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
u8 *pg, *h, hash[64];
u8 *pg, pblock[80] = {
/* 8 x 64 bit init values */
0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08,
0xBB, 0x67, 0xAE, 0x85, 0x84, 0xCA, 0xA7, 0x3B,
0x3C, 0x6E, 0xF3, 0x72, 0xFE, 0x94, 0xF8, 0x2B,
0xA5, 0x4F, 0xF5, 0x3A, 0x5F, 0x1D, 0x36, 0xF1,
0x51, 0x0E, 0x52, 0x7F, 0xAD, 0xE6, 0x82, 0xD1,
0x9B, 0x05, 0x68, 0x8C, 0x2B, 0x3E, 0x6C, 0x1F,
0x1F, 0x83, 0xD9, 0xAB, 0xFB, 0x41, 0xBD, 0x6B,
0x5B, 0xE0, 0xCD, 0x19, 0x13, 0x7E, 0x21, 0x79,
/* 128 bit counter total message bit length */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 };
/* allocate 2 pages */
pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
/* allocate one page stckf buffer */
pg = (u8 *) __get_free_page(GFP_KERNEL);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
/* fill the ebuf in chunks of 64 byte each */
while (nbytes) {
/* fill pages with urandom bytes */
get_random_bytes(pg, 2*PAGE_SIZE);
/* exor pages with 1024 stckf values */
for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
/* fill lower 2k with urandom bytes */
get_random_bytes(pg, PAGE_SIZE / 2);
/* exor upper 2k with 512 stckf values, offset 4 bytes each */
for (n = 0; n < 512; n++) {
int offset = (PAGE_SIZE / 2) + (n * 4) - 4;
u64 *p = (u64 *)(pg + offset);
*p ^= get_tod_clock_fast();
}
n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
if (n < sizeof(hash))
h = hash;
else
h = ebuf;
/* hash over the filled pages */
cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
/* hash over the filled page */
cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
n = (nbytes < 64) ? nbytes : 64;
memcpy(ebuf, pblock, n);
ret += n;
ebuf += n;
nbytes -= n;
}
free_pages((unsigned long)pg, 1);
memzero_explicit(pblock, sizeof(pblock));
memzero_explicit(pg, PAGE_SIZE);
free_page((unsigned long)pg);
return ret;
}
@ -344,8 +367,8 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
int ret, datalen;
u8 seed[64 + 32 + 16];
int ret, datalen, seedlen;
u8 seed[128 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@ -368,16 +391,36 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
/* generate initial seed bytestring, with 256 + 128 bits entropy */
ret = generate_entropy(seed, 64 + 32);
if (ret != 64 + 32)
goto outfree;
/* followed by 16 bytes of unique nonce */
get_tod_clock_ext(seed + 64 + 32);
/* generate initial seed, we need at least 256 + 128 bits entropy. */
if (trng_available) {
/*
* Trng available, so use it. The trng works in chunks of
* 32 bytes and produces 100% entropy. So we pull 64 bytes
* which gives us 512 bits entropy.
*/
seedlen = 2 * 32;
cpacf_trng(NULL, 0, seed, seedlen);
} else {
/*
* No trng available, so use the generate_entropy() function.
* This function works in 64 byte junks and produces
* 50% entropy. So we pull 2*64 bytes which gives us 512 bits
* of entropy.
*/
seedlen = 2 * 64;
ret = generate_entropy(seed, seedlen);
if (ret != seedlen)
goto outfree;
}
/* initial seed of the prno drng */
/* append the seed by 16 bytes of unique nonce */
get_tod_clock_ext(seed + seedlen);
seedlen += 16;
/* now initial seed of the prno drng */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_data->prnows, NULL, 0, seed, sizeof(seed));
&prng_data->prnows, NULL, 0, seed, seedlen);
memzero_explicit(seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
@ -405,17 +448,26 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
int ret;
int ret, seedlen;
u8 seed[64];
/* fetch 256 bits of fresh entropy */
ret = generate_entropy(seed, sizeof(seed));
if (ret != sizeof(seed))
return ret;
/* We need at least 256 bits of fresh entropy for reseeding */
if (trng_available) {
/* trng produces 256 bits entropy in 32 bytes */
seedlen = 32;
cpacf_trng(NULL, 0, seed, seedlen);
} else {
/* generate_entropy() produces 256 bits entropy in 64 bytes */
seedlen = 64;
ret = generate_entropy(seed, seedlen);
if (ret != sizeof(seed))
return ret;
}
/* do a reseed of the prno drng with this bytestring */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_data->prnows, NULL, 0, seed, sizeof(seed));
&prng_data->prnows, NULL, 0, seed, seedlen);
memzero_explicit(seed, sizeof(seed));
return 0;
}
@ -592,6 +644,7 @@ static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
ret = -EFAULT;
break;
}
memzero_explicit(p, n);
ubuf += n;
nbytes -= n;
ret += n;
@ -773,6 +826,10 @@ static int __init prng_init(void)
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* check if TRNG subfunction is available */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
trng_available = true;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PRNO operations */

Просмотреть файл

@ -39,6 +39,7 @@ CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y

Просмотреть файл

@ -15,27 +15,13 @@
#define DBFS_D0C_HDR_VERSION 0
/*
* Execute diagnose 0c in 31 bit mode
*/
static void diag0c(struct hypfs_diag0c_entry *entry)
{
diag_stat_inc(DIAG_STAT_X00C);
asm volatile (
" sam31\n"
" diag %0,%0,0x0c\n"
" sam64\n"
: /* no output register */
: "a" (entry)
: "memory");
}
/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
diag0c(((void **) data)[smp_processor_id()]);
diag_stat_inc(DIAG_STAT_X00C);
diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
}
/*

Просмотреть файл

@ -14,7 +14,7 @@
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
void (*handler)(struct airq_struct *airq, bool floating);
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
@ -35,13 +35,15 @@ struct airq_iv {
unsigned int *data; /* 32 bit value associated with each bit */
unsigned long bits; /* Number of bits in the vector */
unsigned long end; /* Number of highest allocated bit + 1 */
unsigned long flags; /* Allocation flags */
spinlock_t lock; /* Lock to protect alloc & free */
};
#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
#define AIRQ_IV_DATA 8 /* Allocate the data array */
#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
#define AIRQ_IV_DATA 8 /* Allocate the data array */
#define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);

Просмотреть файл

@ -73,7 +73,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or(mask, addr);
__atomic64_or(mask, (long *)addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@ -94,7 +94,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__atomic64_and(mask, addr);
__atomic64_and(mask, (long *)addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@ -115,7 +115,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor(mask, addr);
__atomic64_xor(mask, (long *)addr);
}
static inline int
@ -125,7 +125,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_or_barrier(mask, addr);
old = __atomic64_or_barrier(mask, (long *)addr);
return (old & mask) != 0;
}
@ -136,7 +136,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
old = __atomic64_and_barrier(mask, addr);
old = __atomic64_and_barrier(mask, (long *)addr);
return (old & ~mask) != 0;
}
@ -147,7 +147,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_xor_barrier(mask, addr);
old = __atomic64_xor_barrier(mask, (long *)addr);
return (old & mask) != 0;
}

Просмотреть файл

@ -5,7 +5,14 @@
#include <asm/ipl.h>
extern char early_command_line[COMMAND_LINE_SIZE];
extern struct ipl_parameter_block early_ipl_block;
extern int early_ipl_block_valid;
extern struct ipl_parameter_block ipl_block;
extern int ipl_block_valid;
extern int ipl_secure_flag;
extern unsigned long ipl_cert_list_addr;
extern unsigned long ipl_cert_list_size;
extern unsigned long early_ipl_comp_list_addr;
extern unsigned long early_ipl_comp_list_size;
#endif /* _ASM_S390_BOOT_DATA_H */

Просмотреть файл

@ -15,7 +15,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
".section __bug_table,\"aw\"\n" \
".section __bug_table,\"awM\",@progbits,%2\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
@ -27,17 +27,17 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */
#define __EMIT_BUG(x) do { \
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section __bug_table,\"aw\"\n" \
"2: .long 1b-2b\n" \
" .short %0\n" \
" .org 2b+%1\n" \
".previous\n" \
: : "i" (x), \
"i" (sizeof(struct bug_entry))); \
#define __EMIT_BUG(x) do { \
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section __bug_table,\"awM\",@progbits,%1\n" \
"2: .long 1b-2b\n" \
" .short %0\n" \
" .org 2b+%1\n" \
".previous\n" \
: : "i" (x), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */

Просмотреть файл

@ -308,4 +308,17 @@ union diag318_info {
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
struct hypfs_diag0c_entry;
struct diag_ops {
int (*diag210)(struct diag210 *addr);
int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
void (*diag0c)(struct hypfs_diag0c_entry *entry);
void (*diag308_reset)(void);
};
extern struct diag_ops diag_dma_ops;
extern struct diag210 *__diag210_tmp_dma;
#endif /* _ASM_S390_DIAG_H */

Просмотреть файл

@ -20,7 +20,7 @@ extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr)
{
if (nr-- <= 0)
return;

Просмотреть файл

@ -107,6 +107,10 @@
#define HWCAP_S390_VXRS_BCD 4096
#define HWCAP_S390_VXRS_EXT 8192
#define HWCAP_S390_GS 16384
#define HWCAP_S390_VXRS_EXT2 32768
#define HWCAP_S390_VXRS_PDE 65536
#define HWCAP_S390_SORT 131072
#define HWCAP_S390_DFLT 262144
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL

Просмотреть файл

@ -19,6 +19,11 @@ struct exception_table_entry
int insn, fixup;
};
extern struct exception_table_entry *__start_dma_ex_table;
extern struct exception_table_entry *__stop_dma_ex_table;
const struct exception_table_entry *s390_search_extables(unsigned long addr);
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;

Просмотреть файл

@ -11,9 +11,16 @@
#define MCOUNT_RETURN_FIXUP 18
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifndef __ASSEMBLY__
#ifdef CONFIG_CC_IS_CLANG
/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
#define ftrace_return_address(n) 0UL
#else
#define ftrace_return_address(n) __builtin_return_address(n)
#endif
void _mcount(void);
void ftrace_caller(void);

Просмотреть файл

@ -30,14 +30,8 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
}
static inline void iounmap(volatile void __iomem *addr)
{
}
void __iomem *ioremap(unsigned long offset, unsigned long size);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@ -57,14 +51,17 @@ static inline void ioport_unmap(void __iomem *p)
* the corresponding device and create the mapping cookie.
*/
#define pci_iomap pci_iomap
#define pci_iomap_range pci_iomap_range
#define pci_iounmap pci_iounmap
#define pci_iomap_wc pci_iomap
#define pci_iomap_wc_range pci_iomap_range
#define pci_iomap_wc pci_iomap_wc
#define pci_iomap_wc_range pci_iomap_wc_range
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
#define mmiowb() zpci_barrier()
#define __raw_readb zpci_read_u8
#define __raw_readw zpci_read_u16
#define __raw_readl zpci_read_u32

Просмотреть файл

@ -12,74 +12,36 @@
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <uapi/asm/ipl.h>
#define NSS_NAME_SIZE 8
struct ipl_parameter_block {
struct ipl_pl_hdr hdr;
union {
struct ipl_pb_hdr pb0_hdr;
struct ipl_pb0_common common;
struct ipl_pb0_fcp fcp;
struct ipl_pb0_ccw ccw;
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
};
} __packed __aligned(PAGE_SIZE);
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_fcp))
#define NSS_NAME_SIZE 8
#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_ccw))
#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_fcp))
#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp))
#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_ccw))
#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
#define IPL_MAX_SUPPORTED_VERSION (0)
struct ipl_list_hdr {
u32 len;
u8 reserved1[3];
u8 version;
u32 blk0_len;
u8 pbt;
u8 flags;
u16 reserved2;
u8 loadparm[8];
} __attribute__((packed));
#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1)
struct ipl_block_fcp {
u8 reserved1[305-1];
u8 opt;
u8 reserved2[3];
u16 reserved3;
u16 devno;
u8 reserved4[4];
u64 wwpn;
u64 lun;
u32 bootprog;
u8 reserved5[12];
u64 br_lba;
u32 scp_data_len;
u8 reserved6[260];
u8 scp_data[];
} __attribute__((packed));
#define DIAG308_VMPARM_SIZE 64
#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
u8 reserved1[84];
u16 reserved2 : 13;
u8 ssid : 3;
u16 devno;
u8 vm_flags;
u8 reserved3[3];
u32 vm_parm_len;
u8 nss_name[8];
u8 vm_parm[DIAG308_VMPARM_SIZE];
u8 reserved4[8];
} __attribute__((packed));
struct ipl_parameter_block {
struct ipl_list_hdr hdr;
union {
struct ipl_block_fcp fcp;
struct ipl_block_ccw ccw;
char raw[PAGE_SIZE - sizeof(struct ipl_list_hdr)];
} ipl_info;
} __packed __aligned(PAGE_SIZE);
#define DIAG308_VMPARM_SIZE (64)
#define DIAG308_SCPDATA_OFFSET offsetof(struct ipl_parameter_block, \
fcp.scp_data)
#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - DIAG308_SCPDATA_OFFSET)
struct save_area;
struct save_area * __init save_area_alloc(bool is_boot_cpu);
@ -88,7 +50,6 @@ void __init save_area_add_regs(struct save_area *, void *regs);
void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void s390_reset_system(void);
extern void ipl_store_parameters(void);
extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb);
@ -122,6 +83,33 @@ extern struct ipl_info ipl_info;
extern void setup_ipl(void);
extern void set_os_info_reipl_block(void);
struct ipl_report {
struct ipl_parameter_block *ipib;
struct list_head components;
struct list_head certificates;
size_t size;
};
struct ipl_report_component {
struct list_head list;
struct ipl_rb_component_entry entry;
};
struct ipl_report_certificate {
struct list_head list;
struct ipl_rb_certificate_entry entry;
void *key;
};
struct kexec_buf;
struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib);
void *ipl_report_finish(struct ipl_report *report);
int ipl_report_free(struct ipl_report *report);
int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
unsigned char flags, unsigned short cert);
int ipl_report_add_certificate(struct ipl_report *report, void *key,
unsigned long addr, unsigned long len);
/*
* DIAG 308 support
*/
@ -133,32 +121,12 @@ enum diag308_subcode {
DIAG308_STORE = 6,
};
enum diag308_ipl_type {
DIAG308_IPL_TYPE_FCP = 0,
DIAG308_IPL_TYPE_CCW = 2,
};
enum diag308_opt {
DIAG308_IPL_OPT_IPL = 0x10,
DIAG308_IPL_OPT_DUMP = 0x20,
};
enum diag308_flags {
DIAG308_FLAGS_LP_VALID = 0x80,
};
enum diag308_vm_flags {
DIAG308_VM_FLAGS_NSS_VALID = 0x80,
DIAG308_VM_FLAGS_VP_VALID = 0x40,
};
enum diag308_rc {
DIAG308_RC_OK = 0x0001,
DIAG308_RC_NOCONFIG = 0x0102,
};
extern int diag308(unsigned long subcode, void *addr);
extern void diag308_reset(void);
extern void store_status(void (*fn)(void *), void *data);
extern void lgr_info_log(void);

Просмотреть файл

@ -47,7 +47,6 @@ enum interruption_class {
IRQEXT_CMC,
IRQEXT_FTP,
IRQIO_CIO,
IRQIO_QAI,
IRQIO_DAS,
IRQIO_C15,
IRQIO_C70,
@ -55,12 +54,14 @@ enum interruption_class {
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,
IRQIO_CSC,
IRQIO_PCI,
IRQIO_MSI,
IRQIO_VIR,
IRQIO_QAI,
IRQIO_APB,
IRQIO_PCF,
IRQIO_PCD,
IRQIO_MSI,
IRQIO_VAI,
IRQIO_GAL,
NMI_NMI,

Просмотреть файл

@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/setup.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
@ -42,6 +43,9 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
/* Allow kexec_file to load a segment to 0 */
#define KEXEC_BUF_MEM_UNKNOWN -1
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
@ -51,20 +55,24 @@ struct s390_load_data {
/* Pointer to the kernel buffer. Used to register cmdline etc.. */
void *kernel_buf;
/* Load address of the kernel_buf. */
unsigned long kernel_mem;
/* Parmarea in the kernel buffer. */
struct parmarea *parm;
/* Total size of loaded segments in memory. Used as an offset. */
size_t memsz;
/* Load address of initrd. Used to register INITRD_START in kernel. */
unsigned long initrd_load_addr;
struct ipl_report *report;
};
int kexec_file_add_purgatory(struct kimage *image,
struct s390_load_data *data);
int kexec_file_add_initrd(struct kimage *image,
struct s390_load_data *data,
char *initrd, unsigned long initrd_len);
int *kexec_file_update_kernel(struct kimage *iamge,
struct s390_load_data *data);
int s390_verify_sig(const char *kernel, unsigned long kernel_len);
void *kexec_file_add_components(struct kimage *image,
int (*add_kernel)(struct kimage *image,
struct s390_load_data *data));
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr);
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;

Просмотреть файл

@ -28,5 +28,12 @@
.long (_target) - . ; \
.previous
#define EX_TABLE_DMA(_fault, _target) \
.section .dma.ex_table, "a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#endif /* __ASSEMBLY__ */
#endif

Просмотреть файл

@ -129,7 +129,7 @@ struct lowcore {
/* SMP info area */
__u32 cpu_nr; /* 0x03a0 */
__u32 softirq_pending; /* 0x03a4 */
__u32 preempt_count; /* 0x03a8 */
__s32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03b4 */

Просмотреть файл

@ -32,23 +32,23 @@ _LC_BR_R1 = __LC_BR_R1
.endm
.macro __THUNK_PROLOG_BR r1,r2
__THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
__THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_PROLOG_BC d0,r1,r2
__THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
__THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BR r1,r2
jg __s390x_indirect_jump_r\r2\()use_r\r1
jg __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BC d0,r1,r2
jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
jg __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BRASL r1,r2,r3
brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2
.endm
.macro __DECODE_RR expand,reg,ruse

Просмотреть файл

@ -26,6 +26,9 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
#define ZPCI_DEVFN 0 /* default device number */
#define ZPCI_NR_DMA_SPACES 1
#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
@ -83,6 +86,8 @@ enum zpci_state {
struct zpci_bar_struct {
struct resource *res; /* bus resource */
void __iomem *mio_wb;
void __iomem *mio_wt;
u32 val; /* bar start & 3 flag bits */
u16 map_idx; /* index into bar mapping array */
u8 size; /* order 2 exponent */
@ -112,6 +117,8 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
unsigned int max_msi; /* maximum number of MSI's */
unsigned int msi_first_bit;
unsigned int msi_nr_irqs;
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned long aisb; /* number of the summary bit */
@ -130,6 +137,7 @@ struct zpci_dev {
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16];
bool mio_capable;
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
@ -158,6 +166,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
}
extern const struct attribute_group *zpci_attr_groups[];
extern unsigned int s390_pci_force_floating __initdata;
/* -----------------------------------------------------------------------------
Prototypes
@ -219,6 +228,9 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);

Просмотреть файл

@ -43,6 +43,8 @@ struct clp_fh_list_entry {
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
#define CLP_SET_ENABLE_MIO 2
#define CLP_SET_DISABLE_MIO 3
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
@ -80,7 +82,8 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
u16 : 7;
u16 : 6;
u16 mio_addr_avail : 1;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
@ -96,6 +99,15 @@ struct clp_rsp_query_pci {
u32 reserved[11];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
u32 mio_valid : 6;
u32 : 26;
u32 : 32;
struct {
u64 wb;
u64 wt;
} addr[PCI_BAR_COUNT];
u32 reserved3[6];
} __packed;
/* Query PCI function group request */
@ -118,7 +130,11 @@ struct clp_rsp_query_pci_grp {
u8 refresh : 1; /* TLB refresh mode */
u16 reserved2;
u16 mui;
u64 reserved3;
u16 : 16;
u16 maxfaal;
u16 : 4;
u16 dnoi : 12;
u16 maxcpu;
u64 dasm; /* dma address space mask */
u64 msia; /* MSI address */
u64 reserved4;

Просмотреть файл

@ -2,6 +2,8 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
#include <linux/jump_label.h>
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@ -38,6 +40,8 @@
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
#define ZPCI_MOD_FC_REG_INT_D 16
#define ZPCI_MOD_FC_DEREG_INT_D 17
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
@ -51,16 +55,7 @@
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
/* Function Information Block */
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
u32 : 32;
u8 fc; /* function controls */
u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
struct zpci_fib_fmt0 {
u32 : 1;
u32 isc : 3; /* Interrupt subclass */
u32 noi : 12; /* Number of interrupts */
@ -72,16 +67,90 @@ struct zpci_fib {
u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
};
struct zpci_fib_fmt1 {
u32 : 4;
u32 noi : 12;
u32 : 16;
u32 dibvo : 16;
u32 : 16;
u64 : 64;
u64 : 64;
};
/* Function Information Block */
struct zpci_fib {
u32 fmt : 8; /* format */
u32 : 24;
u32 : 32;
u8 fc; /* function controls */
u64 : 56;
u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */
union {
struct zpci_fib_fmt0 fmt0;
struct zpci_fib_fmt1 fmt1;
};
u64 fmb_addr; /* Function measurement block address and key */
u32 : 32;
u32 gd;
} __packed __aligned(8);
/* directed interruption information block */
struct zpci_diib {
u32 : 1;
u32 isc : 3;
u32 : 28;
u16 : 16;
u16 nr_cpus;
u64 disb_addr;
u64 : 64;
u64 : 64;
} __packed __aligned(8);
/* cpu directed interruption information block */
struct zpci_cdiib {
u64 : 64;
u64 dibv_addr;
u64 : 64;
u64 : 64;
u64 : 64;
} __packed __aligned(8);
union zpci_sic_iib {
struct zpci_diib diib;
struct zpci_cdiib cdiib;
};
DECLARE_STATIC_KEY_FALSE(have_mio);
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
int __zpci_load(u64 *data, u64 req, u64 offset);
int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len);
int __zpci_store(u64 data, u64 req, u64 offset);
int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len);
int __zpci_store_block(const u64 *data, u64 req, u64 offset);
void zpci_barrier(void);
int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc)
{
union zpci_sic_iib iib = {{0}};
return __zpci_set_irq_ctrl(ctl, isc, &iib);
}
#ifdef CONFIG_PCI
static inline void enable_mio_ctl(void)
{
if (static_branch_likely(&have_mio))
__ctl_set_bit(2, 5);
}
#else /* CONFIG_PCI */
static inline void enable_mio_ctl(void) {}
#endif /* CONFIG_PCI */
#endif

Просмотреть файл

@ -37,12 +37,10 @@ extern struct zpci_iomap_entry *zpci_iomap_start;
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
rc = zpci_load(&data, addr, LENGTH); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@ -52,11 +50,9 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
zpci_store(data, req, ZPCI_OFFSET(addr)); \
zpci_store(addr, data, LENGTH); \
}
zpci_read(8, u64)
@ -68,36 +64,38 @@ zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
static inline int zpci_write_single(volatile void __iomem *dst, const void *src,
unsigned long len)
{
u64 val;
switch (len) {
case 1:
val = (u64) *((u8 *) data);
val = (u64) *((u8 *) src);
break;
case 2:
val = (u64) *((u16 *) data);
val = (u64) *((u16 *) src);
break;
case 4:
val = (u64) *((u32 *) data);
val = (u64) *((u32 *) src);
break;
case 8:
val = (u64) *((u64 *) data);
val = (u64) *((u64 *) src);
break;
default:
val = 0; /* let FW report error */
break;
}
return zpci_store(val, req, offset);
return zpci_store(dst, val, len);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
unsigned long len)
{
u64 data;
int cc;
cc = zpci_load(&data, req, offset);
cc = zpci_load(&data, src, len);
if (cc)
goto out;
@ -119,10 +117,8 @@ out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
return zpci_store_block(data, req, offset);
}
int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
@ -140,18 +136,15 @@ static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
rc = zpci_read_single(dst, src, size);
if (rc)
break;
offset += size;
src += size;
dst += size;
n -= size;
}
@ -161,8 +154,6 @@ static inline int zpci_memcpy_fromio(void *dst,
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
@ -171,16 +162,14 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
rc = zpci_write_block(req, src, offset);
rc = zpci_write_block(dst, src, size);
else
rc = zpci_write_single(req, src, offset, size);
rc = zpci_write_single(dst, src, size);
if (rc)
break;
offset += size;
src += size;
dst += size;
n -= size;
}
return rc;

Просмотреть файл

@ -238,7 +238,7 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
@ -277,6 +277,7 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
#define _SEGMENT_ENTRY (0)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
@ -614,15 +615,9 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_bad(pgd_t pgd)
{
/*
* With dynamic page table levels the pgd can be a region table
* entry or a segment table entry. Check for the bit that are
* invalid for either table entry.
*/
unsigned long mask =
~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pgd_val(pgd) & mask) != 0;
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
return 0;
return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
}
static inline unsigned long pgd_pfn(pgd_t pgd)
@ -703,6 +698,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
return 1;
if (pmd_large(pmd))
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
@ -710,8 +707,12 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pud_bad(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return pmd_bad(__pmd(pud_val(pud)));
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
if (type > _REGION_ENTRY_TYPE_R3)
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
if (pud_large(pud))
return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
@ -719,8 +720,12 @@ static inline int pud_bad(pud_t pud)
static inline int p4d_bad(p4d_t p4d)
{
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
return pud_bad(__pud(p4d_val(p4d)));
unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
if (type > _REGION_ENTRY_TYPE_R2)
return 1;
if (type < _REGION_ENTRY_TYPE_R2)
return 0;
return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
}
@ -1204,42 +1209,79 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
/*
* The pgd_offset function *always* adds the index for the top-level
* region/segment table. This is done to get a sequence like the
* following to work:
* pgdp = pgd_offset(current->mm, addr);
* pgd = READ_ONCE(*pgdp);
* p4dp = p4d_offset(&pgd, addr);
* ...
* The subsequent p4d_offset, pud_offset and pmd_offset functions
* only add an index if they dereferenced the pointer.
*/
static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
{
unsigned long rste;
unsigned int shift;
/* Get the first entry of the top level table */
rste = pgd_val(*pgd);
/* Pick up the shift from the table type of the first entry */
shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
}
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
p4d_t *p4d = (p4d_t *) pgd;
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
p4d = (p4d_t *) pgd_deref(*pgd);
return p4d + p4d_index(address);
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
return (p4d_t *) pgd;
}
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
pud_t *pud = (pud_t *) p4d;
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pud = (pud_t *) p4d_deref(*p4d);
return pud + pud_index(address);
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
return (pud_t *) p4d;
}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
pmd_t *pmd = (pmd_t *) pud;
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pmd = (pmd_t *) pud_deref(*pud);
return pmd + pmd_index(address);
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
}
static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
{
return (pte_t *) pmd_deref(*pmd) + pte_index(address);
}
#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
{
unsigned long len, end;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
return false;
return end <= current->mm->context.asce_limit;
}
#define gup_fast_permitted gup_fast_permitted
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@ -1249,12 +1291,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;

Просмотреть файл

@ -156,25 +156,6 @@ struct thread_struct {
typedef struct thread_struct thread_struct;
/*
* Stack layout of a C stack frame.
*/
#ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain;
unsigned long empty1[5];
unsigned long gprs[10];
unsigned int empty2[8];
};
#else
struct stack_frame {
unsigned long empty1[5];
unsigned int empty2[8];
unsigned long gprs[10];
unsigned long back_chain;
};
#endif
#define ARCH_MIN_TASKALIGN 8
#define INIT_THREAD { \
@ -206,11 +187,7 @@ struct mm_struct;
struct seq_file;
struct pt_regs;
typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
@ -244,55 +221,6 @@ static __no_kasan_or_inline unsigned short stap(void)
return cpu_address;
}
#define CALL_ARGS_0() \
register unsigned long r2 asm("2")
#define CALL_ARGS_1(arg1) \
register unsigned long r2 asm("2") = (unsigned long)(arg1)
#define CALL_ARGS_2(arg1, arg2) \
CALL_ARGS_1(arg1); \
register unsigned long r3 asm("3") = (unsigned long)(arg2)
#define CALL_ARGS_3(arg1, arg2, arg3) \
CALL_ARGS_2(arg1, arg2); \
register unsigned long r4 asm("4") = (unsigned long)(arg3)
#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
CALL_ARGS_3(arg1, arg2, arg3); \
register unsigned long r4 asm("5") = (unsigned long)(arg4)
#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
#define CALL_FMT_0
#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
#define CALL_CLOBBER_0 CALL_CLOBBER_1
#define CALL_ON_STACK(fn, stack, nr, args...) \
({ \
CALL_ARGS_##nr(args); \
unsigned long prev; \
\
asm volatile( \
" la %[_prev],0(15)\n" \
" la 15,0(%[_stack])\n" \
" stg %[_prev],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: "+&d" (r2), [_prev] "=&a" (prev) \
: [_stack] "a" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \
r2; \
})
/*
* Give up the time slice of the virtual PU.
*/
@ -339,10 +267,10 @@ static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
" stg %0,%1\n"
" lpswe %2\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
: "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
}
/*
@ -387,12 +315,12 @@ void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
static inline void __noreturn disabled_wait(unsigned long code)
static inline void __noreturn disabled_wait(void)
{
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
psw.addr = code;
psw.addr = _THIS_IP_;
__load_psw(psw);
while (1);
}

Просмотреть файл

@ -79,6 +79,9 @@ struct sclp_info {
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
unsigned char has_sipl : 1;
unsigned char has_sipl_g2 : 1;
unsigned char has_dirq : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;

Просмотреть файл

@ -2,8 +2,20 @@
#ifndef _S390_SECTIONS_H
#define _S390_SECTIONS_H
#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
#include <asm-generic/sections.h>
extern bool initmem_freed;
static inline int arch_is_kernel_initmem_freed(unsigned long addr)
{
if (!initmem_freed)
return 0;
return addr >= (unsigned long)__init_begin &&
addr < (unsigned long)__init_end;
}
/*
* .boot.data section contains variables "shared" between the decompressor and
* the decompressed kernel. The decompressor will store values in them, and
@ -16,4 +28,14 @@
*/
#define __bootdata(var) __section(.boot.data.var) var
/*
* .boot.preserved.data is similar to .boot.data, but it is not part of the
* .init section and thus will be preserved for later use in the decompressed
* kernel.
*/
#define __bootdata_preserved(var) __section(.boot.preserved.data.var) var
extern unsigned long __sdma, __edma;
extern unsigned long __stext_dma, __etext_dma;
#endif

Просмотреть файл

@ -12,7 +12,10 @@
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
#define PARMAREA 0x10400
#define PARMAREA_END 0x11000
#define EARLY_SCCB_OFFSET 0x11000
#define HEAD_END 0x12000
#define EARLY_SCCB_SIZE PAGE_SIZE
/*
* Machine features detected in early.c
@ -65,6 +68,16 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
struct parmarea {
unsigned long ipl_device; /* 0x10400 */
unsigned long initrd_start; /* 0x10408 */
unsigned long initrd_size; /* 0x10410 */
unsigned long oldmem_base; /* 0x10418 */
unsigned long oldmem_size; /* 0x10420 */
char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
};
extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
@ -134,6 +147,12 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
extern unsigned long __kaslr_offset;
static inline unsigned long kaslr_offset(void)
{
return __kaslr_offset;
}
#else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET)

Просмотреть файл

@ -0,0 +1,114 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <asm/switch_to.h>
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
STACK_TYPE_IRQ,
STACK_TYPE_NODAT,
STACK_TYPE_RESTART,
};
struct stack_info {
enum stack_type type;
unsigned long begin, end;
};
const char *stack_type_name(enum stack_type type);
int get_stack_info(unsigned long sp, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask);
static inline bool on_stack(struct stack_info *info,
unsigned long addr, size_t len)
{
if (info->type == STACK_TYPE_UNKNOWN)
return false;
if (addr + len < addr)
return false;
return addr >= info->begin && addr + len < info->end;
}
static inline unsigned long get_stack_pointer(struct task_struct *task,
struct pt_regs *regs)
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
if (task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}
/*
* Stack layout of a C stack frame.
*/
#ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain;
unsigned long empty1[5];
unsigned long gprs[10];
unsigned int empty2[8];
};
#else
struct stack_frame {
unsigned long empty1[5];
unsigned int empty2[8];
unsigned long gprs[10];
unsigned long back_chain;
};
#endif
#define CALL_ARGS_0() \
register unsigned long r2 asm("2")
#define CALL_ARGS_1(arg1) \
register unsigned long r2 asm("2") = (unsigned long)(arg1)
#define CALL_ARGS_2(arg1, arg2) \
CALL_ARGS_1(arg1); \
register unsigned long r3 asm("3") = (unsigned long)(arg2)
#define CALL_ARGS_3(arg1, arg2, arg3) \
CALL_ARGS_2(arg1, arg2); \
register unsigned long r4 asm("4") = (unsigned long)(arg3)
#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
CALL_ARGS_3(arg1, arg2, arg3); \
register unsigned long r4 asm("5") = (unsigned long)(arg4)
#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
#define CALL_FMT_0 "=&d" (r2) :
#define CALL_FMT_1 "+&d" (r2) :
#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
#define CALL_CLOBBER_0 CALL_CLOBBER_1
#define CALL_ON_STACK(fn, stack, nr, args...) \
({ \
CALL_ARGS_##nr(args); \
unsigned long prev; \
\
asm volatile( \
" la %[_prev],0(15)\n" \
" la 15,0(%[_stack])\n" \
" stg %[_prev],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
[_stack] "a" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
r2; \
})
#endif /* _ASM_S390_STACKTRACE_H */

Просмотреть файл

@ -14,13 +14,8 @@
#include <linux/err.h>
#include <asm/ptrace.h>
/*
* The syscall table always contains 32 bit pointers since we know that the
* address of the function to be called is (way) below 4GB. So the "int"
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
extern const unsigned int sys_call_table_emu[];
extern const unsigned long sys_call_table[];
extern const unsigned long sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)

Просмотреть файл

@ -119,8 +119,8 @@
"Type aliasing is used to sanitize syscall arguments");\
asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
__S390_SYS_STUBx(x, name, __VA_ARGS__) \
asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \

Просмотреть файл

@ -55,8 +55,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#ifndef CONFIG_KASAN
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES

Просмотреть файл

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_UNWIND_H
#define _ASM_S390_UNWIND_H
#include <linux/sched.h>
#include <linux/ftrace.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
/*
* To use the stack unwinder it has to be initialized with unwind_start.
* There four combinations for task and regs:
* 1) task==NULL, regs==NULL: the unwind starts for the task that is currently
* running, sp/ip picked up from the CPU registers
* 2) task==NULL, regs!=NULL: the unwind starts from the sp/ip found in
* the struct pt_regs of an interrupt frame for the current task
* 3) task!=NULL, regs==NULL: the unwind starts for an inactive task with
* the sp picked up from task->thread.ksp and the ip picked up from the
* return address stored by __switch_to
* 4) task!=NULL, regs!=NULL: the sp/ip are picked up from the interrupt
* frame 'regs' of a inactive task
* If 'first_frame' is not zero unwind_start skips unwind frames until it
* reaches the specified stack pointer.
* The end of the unwinding is indicated with unwind_done, this can be true
* right after unwind_start, e.g. with first_frame!=0 that can not be found.
* unwind_next_frame skips to the next frame.
* Once the unwind is completed unwind_error() can be used to check if there
* has been a situation where the unwinder could not correctly understand
* the tasks call chain.
*/
struct unwind_state {
struct stack_info stack_info;
unsigned long stack_mask;
struct task_struct *task;
struct pt_regs *regs;
unsigned long sp, ip;
int graph_idx;
bool reliable;
bool error;
};
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long first_frame);
bool unwind_next_frame(struct unwind_state *state);
unsigned long unwind_get_return_address(struct unwind_state *state);
static inline bool unwind_done(struct unwind_state *state)
{
return state->stack_info.type == STACK_TYPE_UNKNOWN;
}
static inline bool unwind_error(struct unwind_state *state)
{
return state->error;
}
static inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
unsigned long sp)
{
sp = sp ? : get_stack_pointer(task, regs);
__unwind_start(state, task, regs, sp);
}
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
{
return unwind_done(state) ? NULL : state->regs;
}
#define unwind_for_each_frame(state, task, regs, first_frame) \
for (unwind_start(state, task, regs, first_frame); \
!unwind_done(state); \
unwind_next_frame(state))
static inline void unwind_init(void) {}
static inline void unwind_module_init(struct module *mod, void *orc_ip,
size_t orc_ip_size, void *orc,
size_t orc_size) {}
#ifdef CONFIG_KASAN
/*
* This disables KASAN checking when reading a value from another task's stack,
* since the other task could be running on another CPU and could have poisoned
* the stack in the meantime.
*/
#define READ_ONCE_TASK_STACK(task, x) \
({ \
unsigned long val; \
if (task == current) \
val = READ_ONCE(x); \
else \
val = READ_ONCE_NOCHECK(x); \
val; \
})
#else
#define READ_ONCE_TASK_STACK(task, x) READ_ONCE(x)
#endif
#endif /* _ASM_S390_UNWIND_H */

132
arch/s390/include/asm/uv.h Normal file
Просмотреть файл

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Ultravisor Interfaces
*
* Copyright IBM Corp. 2019
*
* Author(s):
* Vasily Gorbik <gor@linux.ibm.com>
* Janosch Frank <frankja@linux.ibm.com>
*/
#ifndef _ASM_S390_UV_H
#define _ASM_S390_UV_H
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <asm/page.h>
#define UVC_RC_EXECUTED 0x0001
#define UVC_RC_INV_CMD 0x0002
#define UVC_RC_INV_STATE 0x0003
#define UVC_RC_INV_LEN 0x0005
#define UVC_RC_NO_RESUME 0x0007
#define UVC_CMD_QUI 0x0001
#define UVC_CMD_SET_SHARED_ACCESS 0x1000
#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
/* Bits in installed uv calls */
enum uv_cmds_inst {
BIT_UVC_CMD_QUI = 0,
BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
};
struct uv_cb_header {
u16 len;
u16 cmd; /* Command Code */
u16 rc; /* Response Code */
u16 rrc; /* Return Reason Code */
} __packed __aligned(8);
struct uv_cb_qui {
struct uv_cb_header header;
u64 reserved08;
u64 inst_calls_list[4];
u64 reserved30[15];
} __packed __aligned(8);
struct uv_cb_share {
struct uv_cb_header header;
u64 reserved08[3];
u64 paddr;
u64 reserved28;
} __packed __aligned(8);
static inline int uv_call(unsigned long r1, unsigned long r2)
{
int cc;
asm volatile(
"0: .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
" brc 3,0b\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc)
: [r1] "a" (r1), [r2] "a" (r2)
: "memory", "cc");
return cc;
}
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
static inline int is_prot_virt_guest(void)
{
return prot_virt_guest;
}
static inline int share(unsigned long addr, u16 cmd)
{
struct uv_cb_share uvcb = {
.header.cmd = cmd,
.header.len = sizeof(uvcb),
.paddr = addr
};
if (!is_prot_virt_guest())
return -ENOTSUPP;
/*
* Sharing is page wise, if we encounter addresses that are
* not page aligned, we assume something went wrong. If
* malloced structs are passed to this function, we could leak
* data to the hypervisor.
*/
BUG_ON(addr & ~PAGE_MASK);
if (!uv_call(0, (u64)&uvcb))
return 0;
return -EINVAL;
}
/*
* Guest 2 request to the Ultravisor to make a page shared with the
* hypervisor for IO.
*
* @addr: Real or absolute address of the page to be shared
*/
static inline int uv_set_shared(unsigned long addr)
{
return share(addr, UVC_CMD_SET_SHARED_ACCESS);
}
/*
* Guest 2 request to the Ultravisor to make a page unshared.
*
* @addr: Real or absolute address of the page to be unshared
*/
static inline int uv_remove_shared(unsigned long addr)
{
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
void uv_query_info(void);
#else
#define is_prot_virt_guest() 0
static inline int uv_set_shared(unsigned long addr) { return 0; }
static inline int uv_remove_shared(unsigned long addr) { return 0; }
static inline void uv_query_info(void) {}
#endif
#endif /* _ASM_S390_UV_H */

Просмотреть файл

@ -18,3 +18,16 @@
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \
__boot_data_end = .; \
}
/*
* .boot.preserved.data is similar to .boot.data, but it is not part of the
* .init section and thus will be preserved for later use in the decompressed
* kernel.
*/
#define BOOT_DATA_PRESERVED \
. = ALIGN(PAGE_SIZE); \
.boot.preserved.data : { \
__boot_data_preserved_start = .; \
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.preserved.data*))) \
__boot_data_preserved_end = .; \
}

Просмотреть файл

@ -0,0 +1,154 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_UAPI_IPL_H
#define _ASM_S390_UAPI_IPL_H
#include <linux/types.h>
/* IPL Parameter List header */
struct ipl_pl_hdr {
__u32 len;
__u8 flags;
__u8 reserved1[2];
__u8 version;
} __packed;
#define IPL_PL_FLAG_IPLPS 0x80
#define IPL_PL_FLAG_SIPL 0x40
#define IPL_PL_FLAG_IPLSR 0x20
/* IPL Parameter Block header */
struct ipl_pb_hdr {
__u32 len;
__u8 pbt;
} __packed;
/* IPL Parameter Block types */
enum ipl_pbt {
IPL_PBT_FCP = 0,
IPL_PBT_SCP_DATA = 1,
IPL_PBT_CCW = 2,
};
/* IPL Parameter Block 0 with common fields */
struct ipl_pb0_common {
__u32 len;
__u8 pbt;
__u8 flags;
__u8 reserved1[2];
__u8 loadparm[8];
__u8 reserved2[84];
} __packed;
#define IPL_PB0_FLAG_LOADPARM 0x80
/* IPL Parameter Block 0 for FCP */
struct ipl_pb0_fcp {
__u32 len;
__u8 pbt;
__u8 reserved1[3];
__u8 loadparm[8];
__u8 reserved2[304];
__u8 opt;
__u8 reserved3[3];
__u8 cssid;
__u8 reserved4[1];
__u16 devno;
__u8 reserved5[4];
__u64 wwpn;
__u64 lun;
__u32 bootprog;
__u8 reserved6[12];
__u64 br_lba;
__u32 scp_data_len;
__u8 reserved7[260];
__u8 scp_data[];
} __packed;
#define IPL_PB0_FCP_OPT_IPL 0x10
#define IPL_PB0_FCP_OPT_DUMP 0x20
/* IPL Parameter Block 0 for CCW */
struct ipl_pb0_ccw {
__u32 len;
__u8 pbt;
__u8 flags;
__u8 reserved1[2];
__u8 loadparm[8];
__u8 reserved2[84];
__u16 reserved3 : 13;
__u8 ssid : 3;
__u16 devno;
__u8 vm_flags;
__u8 reserved4[3];
__u32 vm_parm_len;
__u8 nss_name[8];
__u8 vm_parm[64];
__u8 reserved5[8];
} __packed;
#define IPL_PB0_CCW_VM_FLAG_NSS 0x80
#define IPL_PB0_CCW_VM_FLAG_VP 0x40
/* IPL Parameter Block 1 for additional SCP data */
struct ipl_pb1_scp_data {
__u32 len;
__u8 pbt;
__u8 scp_data[];
} __packed;
/* IPL Report List header */
struct ipl_rl_hdr {
__u32 len;
__u8 flags;
__u8 reserved1[2];
__u8 version;
__u8 reserved2[8];
} __packed;
/* IPL Report Block header */
struct ipl_rb_hdr {
__u32 len;
__u8 rbt;
__u8 reserved1[11];
} __packed;
/* IPL Report Block types */
enum ipl_rbt {
IPL_RBT_CERTIFICATES = 1,
IPL_RBT_COMPONENTS = 2,
};
/* IPL Report Block for the certificate list */
struct ipl_rb_certificate_entry {
__u64 addr;
__u64 len;
} __packed;
struct ipl_rb_certificates {
__u32 len;
__u8 rbt;
__u8 reserved1[11];
struct ipl_rb_certificate_entry entries[];
} __packed;
/* IPL Report Block for the component list */
struct ipl_rb_component_entry {
__u64 addr;
__u64 len;
__u8 flags;
__u8 reserved1[5];
__u16 certificate_index;
__u8 reserved2[8];
};
#define IPL_RB_COMPONENT_FLAG_SIGNED 0x80
#define IPL_RB_COMPONENT_FLAG_VERIFIED 0x40
struct ipl_rb_components {
__u32 len;
__u8 rbt;
__u8 reserved1[11];
struct ipl_rb_component_entry entries[];
} __packed;
#endif

Просмотреть файл

@ -39,6 +39,7 @@ CFLAGS_smp.o := -Wno-nonnull
#
CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
#
# Pass UTS_MACHINE for user_regset definition
@ -51,7 +52,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
extra-y += head64.o vmlinux.lds
@ -77,6 +78,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
obj-$(CONFIG_IMA) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
@ -86,7 +89,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss

Просмотреть файл

@ -16,6 +16,7 @@
#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
int main(void)
{

Просмотреть файл

@ -28,6 +28,7 @@ ENTRY(s390_base_mcck_handler)
1: la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
lpswe __LC_MCK_OLD_PSW
ENDPROC(s390_base_mcck_handler)
.section .bss
.align 8
@ -48,6 +49,7 @@ ENTRY(s390_base_ext_handler)
1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
ENDPROC(s390_base_ext_handler)
.section .bss
.align 8
@ -68,6 +70,7 @@ ENTRY(s390_base_pgm_handler)
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
ENDPROC(s390_base_pgm_handler)
.align 8
disabled_wait_psw:
@ -79,71 +82,3 @@ disabled_wait_psw:
s390_base_pgm_handler_fn:
.quad 0
.previous
#
# Calls diag 308 subcode 1 and continues execution
#
ENTRY(diag308_reset)
larl %r4,.Lctlregs # Save control registers
stctg %c0,%c15,0(%r4)
lg %r2,0(%r4) # Disable lowcore protection
nilh %r2,0xefff
larl %r4,.Lctlreg0
stg %r2,0(%r4)
lctlg %c0,%c0,0(%r4)
larl %r4,.Lfpctl # Floating point control register
stfpc 0(%r4)
larl %r4,.Lprefix # Save prefix register
stpx 0(%r4)
larl %r4,.Lprefix_zero # Set prefix register to 0
spx 0(%r4)
larl %r4,.Lcontinue_psw # Save PSW flags
epsw %r2,%r3
stm %r2,%r3,0(%r4)
larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
lghi %r3,0
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
lghi %r0,0
diag %r0,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register
lfpc 0(%r4)
larl %r4,.Lprefix # Restore prefix register
spx 0(%r4)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
BR_EX %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2
.section .data..nosave,"aw",@progbits
.align 8
.Lcontinue_psw:
.quad 0,.Lcontinue
.previous
.section .bss
.align 8
.Lctlreg0:
.quad 0
.Lctlregs:
.rept 16
.quad 0
.endr
.Lfpctl:
.long 0
.Lprefix:
.long 0
.Lprefix_zero:
.long 0
.previous

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
#include <asm/sections.h>
struct diag_stat {
unsigned int counter[NR_DIAG_STAT];
@ -49,6 +50,9 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
struct diag_ops __bootdata_preserved(diag_dma_ops);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
static int show_diag_stat(struct seq_file *m, void *v)
{
struct diag_stat *stat;
@ -139,30 +143,10 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
static inline int __diag14(unsigned long rx, unsigned long ry1,
unsigned long subcode)
{
register unsigned long _ry1 asm("2") = ry1;
register unsigned long _ry2 asm("3") = subcode;
int rc = 0;
asm volatile(
" sam31\n"
" diag %2,2,0x14\n"
" sam64\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+d" (_ry2)
: "d" (rx), "d" (_ry1)
: "cc");
return rc;
}
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
return __diag14(rx, ry1, subcode);
return diag_dma_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
@ -195,30 +179,17 @@ EXPORT_SYMBOL(diag204);
*/
int diag210(struct diag210 *addr)
{
/*
* diag 210 needs its data below the 2GB border, so we
* use a static data area to be sure
*/
static struct diag210 diag210_tmp;
static DEFINE_SPINLOCK(diag210_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
*__diag210_tmp_dma = *addr;
diag_stat_inc(DIAG_STAT_X210);
asm volatile(
" lhi %0,-1\n"
" sam31\n"
" diag %1,0,0x210\n"
"0: ipm %0\n"
" srl %0,28\n"
"1: sam64\n"
EX_TABLE(0b, 1b)
: "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
*addr = diag210_tmp;
*addr = *__diag210_tmp_dma;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
@ -243,27 +214,9 @@ EXPORT_SYMBOL(diag224);
/*
* Diagnose 26C: Access Certain System Information
*/
static inline int __diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
register unsigned long _req asm("2") = (addr_t) req;
register unsigned long _resp asm("3") = (addr_t) resp;
register unsigned long _subcode asm("4") = subcode;
register unsigned long _rc asm("5") = -EOPNOTSUPP;
asm volatile(
" sam31\n"
" diag %[rx],%[ry],0x26c\n"
"0: sam64\n"
EX_TABLE(0b,0b)
: "+d" (_rc)
: [rx] "d" (_req), "d" (_resp), [ry] "d" (_subcode)
: "cc", "memory");
return _rc;
}
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
return __diag26c(req, resp, subcode);
return diag_dma_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);

Просмотреть файл

@ -21,95 +21,124 @@
#include <asm/debug.h>
#include <asm/dis.h>
#include <asm/ipl.h>
#include <asm/unwind.h>
/*
* For dump_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long __no_sanitize_address
__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
const char *stack_type_name(enum stack_type type)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
if (func(data, sf->gprs[8], 0))
return sp;
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
if (func(data, sf->gprs[8], 1))
return sp;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
if (!user_mode(regs)) {
if (func(data, regs->psw.addr, 1))
return sp;
}
low = sp;
sp = regs->gprs[15];
switch (type) {
case STACK_TYPE_TASK:
return "task";
case STACK_TYPE_IRQ:
return "irq";
case STACK_TYPE_NODAT:
return "nodat";
case STACK_TYPE_RESTART:
return "restart";
default:
return "unknown";
}
}
void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
unsigned long sp)
static inline bool in_stack(unsigned long sp, struct stack_info *info,
enum stack_type type, unsigned long low,
unsigned long high)
{
unsigned long frame_size;
if (sp < low || sp >= high)
return false;
info->type = type;
info->begin = low;
info->end = high;
return true;
}
static bool in_task_stack(unsigned long sp, struct task_struct *task,
struct stack_info *info)
{
unsigned long stack;
stack = (unsigned long) task_stack_page(task);
return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
}
static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
unsigned long frame_size, top;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
S390_lowcore.nodat_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - THREAD_SIZE,
S390_lowcore.async_stack + frame_size);
task = task ?: current;
__dump_trace(func, data, sp,
(unsigned long)task_stack_page(task),
(unsigned long)task_stack_page(task) + THREAD_SIZE);
top = S390_lowcore.async_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
}
EXPORT_SYMBOL_GPL(dump_trace);
static int show_address(void *data, unsigned long address, int reliable)
static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
{
if (reliable)
printk(" [<%016lx>] %pSR \n", address, (void *)address);
else
printk("([<%016lx>] %pSR)\n", address, (void *)address);
unsigned long frame_size, top;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
top = S390_lowcore.nodat_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
}
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
unsigned long frame_size, top;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
top = S390_lowcore.restart_stack + frame_size;
return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
}
int get_stack_info(unsigned long sp, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask)
{
if (!sp)
goto unknown;
task = task ? : current;
/* Check per-task stack */
if (in_task_stack(sp, task, info))
goto recursion_check;
if (task != current)
goto unknown;
/* Check per-cpu stacks */
if (!in_irq_stack(sp, info) &&
!in_nodat_stack(sp, info) &&
!in_restart_stack(sp, info))
goto unknown;
recursion_check:
/*
* Make sure we don't iterate through any given stack more than once.
* If it comes up a second time then there's something wrong going on:
* just break out and report an unknown stack type.
*/
if (*visit_mask & (1UL << info->type)) {
printk_deferred_once(KERN_WARNING
"WARNING: stack recursion on stack type %d\n",
info->type);
goto unknown;
}
*visit_mask |= 1UL << info->type;
return 0;
unknown:
info->type = STACK_TYPE_UNKNOWN;
return -EINVAL;
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long sp = (unsigned long) stack;
struct unwind_state state;
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
dump_trace(show_address, NULL, task, sp);
if (!task)
task = current;
debug_show_held_locks(task);
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
printk(state.reliable ? " [<%016lx>] %pSR \n" :
"([<%016lx>] %pSR)\n",
state.ip, (void *) state.ip);
debug_show_held_locks(task ? : current);
}
static void show_last_breaking_event(struct pt_regs *regs)

Просмотреть файл

@ -30,6 +30,7 @@
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
#include <asm/pci_insn.h>
#include "entry.h"
/*
@ -138,9 +139,9 @@ static void early_pgm_check_handler(void)
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
fixup = search_exception_tables(addr);
fixup = s390_search_extables(addr);
if (!fixup)
disabled_wait(0);
disabled_wait();
/* Disable low address protection before storing into lowcore. */
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~(1UL << 28);
@ -235,6 +236,7 @@ static __init void detect_machine_facilities(void)
clock_comparator_max = -1ULL >> 1;
__ctl_set_bit(0, 53);
}
enable_mio_ctl();
}
static inline void save_vector_registers(void)
@ -296,7 +298,7 @@ static void __init check_image_bootable(void)
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
disabled_wait(0xbadb007);
disabled_wait();
}
void __init startup_init(void)
@ -309,7 +311,6 @@ void __init startup_init(void)
setup_facility_list();
detect_machine_type();
setup_arch_string();
ipl_store_parameters();
setup_boot_command_line();
detect_diag9c();
detect_diag44();

Просмотреть файл

@ -25,7 +25,7 @@ static void __init reset_tod_clock(void)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
disabled_wait(0);
disabled_wait();
memset(tod_clock_base, 0, 16);
*(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;

Просмотреть файл

@ -224,6 +224,7 @@ ENTRY(__bpon)
.globl __bpon
BPON
BR_EX %r14
ENDPROC(__bpon)
/*
* Scheduler resume function, called by switch_to
@ -248,6 +249,7 @@ ENTRY(__switch_to)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
BR_EX %r14
ENDPROC(__switch_to)
.L__critical_start:
@ -324,6 +326,7 @@ sie_exit:
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
ENDPROC(sie64a)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
@ -358,19 +361,19 @@ ENTRY(system_call)
# load address of system call table
lg %r10,__THREAD_sysc_table(%r13,%r12)
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
slag %r8,%r8,3 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
slag %r8,%r1,3
.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lgf %r9,0(%r8,%r10) # get system call add.
lg %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
BASR_EX %r14,%r9 # call sys_xxxx
@ -556,8 +559,8 @@ ENTRY(system_call)
lghi %r0,NR_syscalls
clgr %r0,%r2
jnh .Lsysc_tracenogo
sllg %r8,%r2,2
lgf %r9,0(%r8,%r10)
sllg %r8,%r2,3
lg %r9,0(%r8,%r10)
.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@ -570,6 +573,7 @@ ENTRY(system_call)
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,.Lsysc_return
jg do_syscall_trace_exit
ENDPROC(system_call)
#
# a new process exits the kernel with ret_from_fork
@ -584,10 +588,16 @@ ENTRY(ret_from_fork)
jne .Lsysc_tracenogo
# it's a kernel thread
lmg %r9,%r10,__PT_R9(%r11) # load gprs
la %r2,0(%r10)
BASR_EX %r14,%r9
j .Lsysc_tracenogo
ENDPROC(ret_from_fork)
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
BASR_EX %r14,%r9
j .Lsysc_tracenogo
ENDPROC(kernel_thread_starter)
/*
* Program check handler routine
@ -665,9 +675,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
sll %r10,2
sll %r10,3
je .Lpgm_return
lgf %r9,0(%r10,%r1) # load address of handler routine
lg %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
BASR_EX %r14,%r9 # branch to interrupt-handler
.Lpgm_return:
@ -698,6 +708,7 @@ ENTRY(pgm_check_handler)
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
ENDPROC(pgm_check_handler)
/*
* IO interrupt handler routine
@ -926,6 +937,7 @@ ENTRY(io_int_handler)
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
ENDPROC(io_int_handler)
/*
* External interrupt handler routine
@ -965,6 +977,7 @@ ENTRY(ext_int_handler)
lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ
j .Lio_return
ENDPROC(ext_int_handler)
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
@ -989,6 +1002,7 @@ ENTRY(psw_idle)
lpswe __SF_EMPTY(%r15)
BR_EX %r14
.Lpsw_idle_end:
ENDPROC(psw_idle)
/*
* Store floating-point controls and floating-point or vector register
@ -1031,6 +1045,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_exit:
BR_EX %r14
.Lsave_fpu_regs_end:
ENDPROC(save_fpu_regs)
EXPORT_SYMBOL(save_fpu_regs)
/*
@ -1077,6 +1092,7 @@ load_fpu_regs:
.Lload_fpu_regs_exit:
BR_EX %r14
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
.L__critical_end:
@ -1206,6 +1222,7 @@ ENTRY(mcck_int_handler)
lg %r15,__LC_NODAT_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
ENDPROC(mcck_int_handler)
#
# PSW restart interrupt handler
@ -1232,6 +1249,7 @@ ENTRY(restart_int_handler)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
ENDPROC(restart_int_handler)
.section .kprobes.text, "ax"
@ -1241,7 +1259,7 @@ ENTRY(restart_int_handler)
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
ENTRY(stack_overflow)
lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@ -1251,9 +1269,10 @@ stack_overflow:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
ENDPROC(stack_overflow)
#endif
cleanup_critical:
ENTRY(cleanup_critical)
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
@ -1289,6 +1308,7 @@ cleanup_critical:
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
0: BR_EX %r14,%r11
ENDPROC(cleanup_critical)
.align 8
.Lcleanup_table:
@ -1512,7 +1532,7 @@ cleanup_critical:
.quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
#define SYSCALL(esame,emu) .long __s390x_ ## esame
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
.globl sys_call_table
sys_call_table:
#include "asm/syscall_table.h"
@ -1520,7 +1540,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esame,emu) .long __s390_ ## emu
#define SYSCALL(esame,emu) .quad __s390_ ## emu
.globl sys_call_table_emu
sys_call_table_emu:
#include "asm/syscall_table.h"

Просмотреть файл

@ -65,7 +65,7 @@ int setup_profiling_timer(unsigned int multiplier);
void __init time_init(void);
int pfn_is_nosave(unsigned long);
void s390_early_resume(void);
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
struct s390_mmap_arg_struct;
struct fadvise64_64_args;

Просмотреть файл

@ -201,17 +201,18 @@ device_initcall(ftrace_plt_init);
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
unsigned long ip)
{
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip -= MCOUNT_INSN_SIZE;
if (!function_graph_enter(parent, ip, 0, NULL))
parent = (unsigned long) return_to_handler;
if (!function_graph_enter(ra, ip, 0, (void *) sp))
ra = (unsigned long) return_to_handler;
out:
return parent;
return ra;
}
NOKPROBE_SYMBOL(prepare_ftrace_return);

Просмотреть файл

@ -26,7 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
@ -61,22 +60,6 @@ ENTRY(startup_continue)
.align 16
.LPG1:
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0xffff # cr4: instruction authorization
.quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
.quad 0 # cr9: tracing off
.quad 0 # cr10: tracing off
.quad 0 # cr11: tracing off
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@ -84,14 +67,5 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
.Llinkage_stack:
.long 0,0,0x89000000,0,0,0,0x8a000000,0
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0

Просмотреть файл

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ima.h>
#include <asm/boot_data.h>
bool arch_ima_get_secureboot(void)
{
return ipl_secure_flag;
}
const char * const *arch_get_ima_policy(void)
{
return NULL;
}

Просмотреть файл

@ -31,6 +31,7 @@
#include <asm/os_info.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/uv.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
@ -119,11 +120,15 @@ static char *dump_type_str(enum dump_type type)
}
}
struct ipl_parameter_block __bootdata(early_ipl_block);
int __bootdata(early_ipl_block_valid);
int __bootdata_preserved(ipl_block_valid);
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_secure_flag);
static int ipl_block_valid;
static struct ipl_parameter_block ipl_block;
unsigned long __bootdata_preserved(ipl_cert_list_addr);
unsigned long __bootdata_preserved(ipl_cert_list_size);
unsigned long __bootdata(early_ipl_comp_list_addr);
unsigned long __bootdata(early_ipl_comp_list_size);
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@ -246,11 +251,11 @@ static __init enum ipl_type get_ipl_type(void)
if (!ipl_block_valid)
return IPL_TYPE_UNKNOWN;
switch (ipl_block.hdr.pbt) {
case DIAG308_IPL_TYPE_CCW:
switch (ipl_block.pb0_hdr.pbt) {
case IPL_PBT_CCW:
return IPL_TYPE_CCW;
case DIAG308_IPL_TYPE_FCP:
if (ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
case IPL_PBT_FCP:
if (ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
@ -269,12 +274,35 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
static ssize_t ipl_secure_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%i\n", !!ipl_secure_flag);
}
static struct kobj_attribute sys_ipl_secure_attr =
__ATTR(secure, 0444, ipl_secure_show, NULL);
static ssize_t ipl_has_secure_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
if (MACHINE_IS_LPAR)
return sprintf(page, "%i\n", !!sclp.has_sipl);
else if (MACHINE_IS_VM)
return sprintf(page, "%i\n", !!sclp.has_sipl_g2);
else
return sprintf(page, "%i\n", 0);
}
static struct kobj_attribute sys_ipl_has_secure_attr =
__ATTR(has_secure, 0444, ipl_has_secure_show, NULL);
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
if (ipl_block_valid && (ipl_block.pb0_hdr.pbt == IPL_PBT_CCW))
ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
@ -287,12 +315,11 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
{
switch (ipl_info.type) {
case IPL_TYPE_CCW:
return sprintf(page, "0.%x.%04x\n", ipl_block.ipl_info.ccw.ssid,
ipl_block.ipl_info.ccw.devno);
return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
ipl_block.ccw.devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n",
ipl_block.ipl_info.fcp.devno);
return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
default:
return 0;
}
@ -316,8 +343,8 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int size = ipl_block.ipl_info.fcp.scp_data_len;
void *scp_data = &ipl_block.ipl_info.fcp.scp_data;
unsigned int size = ipl_block.fcp.scp_data_len;
void *scp_data = &ipl_block.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@ -333,13 +360,13 @@ static struct bin_attribute *ipl_fcp_bin_attrs[] = {
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
(unsigned long long)ipl_block.ipl_info.fcp.wwpn);
(unsigned long long)ipl_block.fcp.wwpn);
DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n",
(unsigned long long)ipl_block.ipl_info.fcp.lun);
(unsigned long long)ipl_block.fcp.lun);
DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
(unsigned long long)ipl_block.ipl_info.fcp.bootprog);
(unsigned long long)ipl_block.fcp.bootprog);
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
(unsigned long long)ipl_block.ipl_info.fcp.br_lba);
(unsigned long long)ipl_block.fcp.br_lba);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
@ -365,6 +392,8 @@ static struct attribute *ipl_fcp_attrs[] = {
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_secure_attr.attr,
&sys_ipl_has_secure_attr.attr,
NULL,
};
@ -380,6 +409,8 @@ static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
&sys_ipl_secure_attr.attr,
&sys_ipl_has_secure_attr.attr,
NULL,
};
@ -387,6 +418,8 @@ static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_secure_attr.attr,
&sys_ipl_has_secure_attr.attr,
NULL,
};
@ -495,14 +528,14 @@ static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
ipb->ipl_info.ccw.vm_parm_len = ip_len;
memset(ipb->ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
ipb->ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
memcpy(ipb->ccw.vm_parm, buf, ip_len);
ASCEBC(ipb->ccw.vm_parm, ip_len);
} else {
ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
ipb->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_VP;
}
return len;
@ -549,8 +582,8 @@ static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
size_t size = reipl_block_fcp->fcp.scp_data_len;
void *scp_data = reipl_block_fcp->fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@ -566,17 +599,17 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
if (off)
return -EINVAL;
memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
reipl_block_fcp->fcp.scp_data_len = scpdata_len;
return count;
}
@ -590,20 +623,20 @@ static struct bin_attribute *reipl_fcp_bin_attrs[] = {
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
reipl_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->ipl_info.fcp.lun);
reipl_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.bootprog);
reipl_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
reipl_block_fcp->ipl_info.fcp.br_lba);
reipl_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->ipl_info.fcp.devno);
reipl_block_fcp->fcp.devno);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
memcpy(loadparm, ibp->common.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
@ -638,11 +671,11 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
return -EINVAL;
}
/* initialize loadparm with blanks */
memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
memset(ipb->common.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
memcpy(ipb->common.loadparm, buf, lp_len);
ASCEBC(ipb->common.loadparm, LOADPARM_LEN);
ipb->common.flags |= IPL_PB0_FLAG_LOADPARM;
return len;
}
@ -680,7 +713,7 @@ static struct attribute_group reipl_fcp_attr_group = {
};
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@ -742,7 +775,7 @@ static struct attribute_group reipl_ccw_attr_group_lpar = {
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
memcpy(dst, ipb->ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
@ -770,16 +803,14 @@ static ssize_t reipl_nss_name_store(struct kobject *kobj,
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
memset(reipl_block_nss->ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
reipl_block_nss->ipl_info.ccw.vm_flags |=
DIAG308_VM_FLAGS_NSS_VALID;
memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
reipl_block_nss->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_NSS;
memcpy(reipl_block_nss->ccw.nss_name, buf, nss_len);
ASCEBC(reipl_block_nss->ccw.nss_name, nss_len);
EBC_TOUPPER(reipl_block_nss->ccw.nss_name, nss_len);
} else {
reipl_block_nss->ipl_info.ccw.vm_flags &=
~DIAG308_VM_FLAGS_NSS_VALID;
reipl_block_nss->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_NSS;
}
return len;
@ -866,15 +897,21 @@ static void __reipl_run(void *unused)
{
switch (reipl_type) {
case IPL_TYPE_CCW:
uv_set_shared(__pa(reipl_block_ccw));
diag308(DIAG308_SET, reipl_block_ccw);
uv_remove_shared(__pa(reipl_block_ccw));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_FCP:
uv_set_shared(__pa(reipl_block_fcp));
diag308(DIAG308_SET, reipl_block_fcp);
uv_remove_shared(__pa(reipl_block_fcp));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_NSS:
uv_set_shared(__pa(reipl_block_nss));
diag308(DIAG308_SET, reipl_block_nss);
uv_remove_shared(__pa(reipl_block_nss));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_UNKNOWN:
@ -883,7 +920,7 @@ static void __reipl_run(void *unused)
case IPL_TYPE_FCP_DUMP:
break;
}
disabled_wait((unsigned long) __builtin_return_address(0));
disabled_wait();
}
static void reipl_run(struct shutdown_trigger *trigger)
@ -893,10 +930,10 @@ static void reipl_run(struct shutdown_trigger *trigger)
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
ipb->hdr.len = IPL_BP_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
ipb->pb0_hdr.len = IPL_BP0_CCW_LEN;
ipb->pb0_hdr.pbt = IPL_PBT_CCW;
}
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
@ -904,21 +941,20 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
memcpy(ipb->ccw.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
memset(ipb->ccw.loadparm, 0x40, LOADPARM_LEN);
ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM;
/* VM PARM */
if (MACHINE_IS_VM && ipl_block_valid &&
(ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
(ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
ipb->ipl_info.ccw.vm_parm_len =
ipl_block.ipl_info.ccw.vm_parm_len;
memcpy(ipb->ipl_info.ccw.vm_parm,
ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
ipb->ccw.vm_parm_len = ipl_block.ccw.vm_parm_len;
memcpy(ipb->ccw.vm_parm,
ipl_block.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
@ -958,8 +994,8 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
reipl_block_ccw->ipl_info.ccw.ssid = ipl_block.ipl_info.ccw.ssid;
reipl_block_ccw->ipl_info.ccw.devno = ipl_block.ipl_info.ccw.devno;
reipl_block_ccw->ccw.ssid = ipl_block.ccw.ssid;
reipl_block_ccw->ccw.devno = ipl_block.ccw.devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@ -997,14 +1033,14 @@ static int __init reipl_fcp_init(void)
* is invalid in the SCSI IPL parameter block, so take it
* always from sclp_ipl_info.
*/
memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
memcpy(reipl_block_fcp->fcp.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
reipl_block_fcp->fcp.pbt = IPL_PBT_FCP;
reipl_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
@ -1022,10 +1058,10 @@ static int __init reipl_type_init(void)
/*
* If we have an OS info reipl block, this will be used
*/
if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
} else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
}
@ -1070,15 +1106,15 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.wwpn);
dump_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->ipl_info.fcp.lun);
dump_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.bootprog);
dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
dump_block_fcp->ipl_info.fcp.br_lba);
dump_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->ipl_info.fcp.devno);
dump_block_fcp->fcp.devno);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
@ -1095,7 +1131,7 @@ static struct attribute_group dump_fcp_attr_group = {
};
/* CCW dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
@ -1145,7 +1181,9 @@ static struct kset *dump_kset;
static void diag308_dump(void *dump_block)
{
uv_set_shared(__pa(dump_block));
diag308(DIAG308_SET, dump_block);
uv_remove_shared(__pa(dump_block));
while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
@ -1187,10 +1225,10 @@ static int __init dump_ccw_init(void)
free_page((unsigned long)dump_block_ccw);
return rc;
}
dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
dump_block_ccw->hdr.len = IPL_BP_CCW_LEN;
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
dump_block_ccw->ccw.len = IPL_BP0_CCW_LEN;
dump_block_ccw->ccw.pbt = IPL_PBT_CCW;
dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
@ -1209,11 +1247,11 @@ static int __init dump_fcp_init(void)
free_page((unsigned long)dump_block_fcp);
return rc;
}
dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
dump_block_fcp->hdr.len = IPL_BP_FCP_LEN;
dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
dump_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
dump_block_fcp->fcp.pbt = IPL_PBT_FCP;
dump_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
@ -1337,7 +1375,7 @@ static void stop_run(struct shutdown_trigger *trigger)
{
if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0)
disabled_wait((unsigned long) __builtin_return_address(0));
disabled_wait();
smp_stop_cpu();
}
@ -1572,7 +1610,7 @@ static int __init s390_ipl_init(void)
* READ SCP info provides the correct value.
*/
if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid)
memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, LOADPARM_LEN);
memcpy(sclp_ipl_info.loadparm, ipl_block.ccw.loadparm, LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
@ -1657,15 +1695,15 @@ void __init setup_ipl(void)
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
ipl_info.data.ccw.dev_id.ssid = ipl_block.ipl_info.ccw.ssid;
ipl_info.data.ccw.dev_id.devno = ipl_block.ipl_info.ccw.devno;
ipl_info.data.ccw.dev_id.ssid = ipl_block.ccw.ssid;
ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.dev_id.devno = ipl_block.ipl_info.fcp.devno;
ipl_info.data.fcp.wwpn = ipl_block.ipl_info.fcp.wwpn;
ipl_info.data.fcp.lun = ipl_block.ipl_info.fcp.lun;
ipl_info.data.fcp.dev_id.devno = ipl_block.fcp.devno;
ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
@ -1675,14 +1713,6 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
void __init ipl_store_parameters(void)
{
if (early_ipl_block_valid) {
memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
ipl_block_valid = 1;
}
}
void s390_reset_system(void)
{
/* Disable prefixing */
@ -1690,5 +1720,139 @@ void s390_reset_system(void)
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
diag308_reset();
diag_dma_ops.diag308_reset();
}
#ifdef CONFIG_KEXEC_FILE
int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
unsigned char flags, unsigned short cert)
{
struct ipl_report_component *comp;
comp = vzalloc(sizeof(*comp));
if (!comp)
return -ENOMEM;
list_add_tail(&comp->list, &report->components);
comp->entry.addr = kbuf->mem;
comp->entry.len = kbuf->memsz;
comp->entry.flags = flags;
comp->entry.certificate_index = cert;
report->size += sizeof(comp->entry);
return 0;
}
int ipl_report_add_certificate(struct ipl_report *report, void *key,
unsigned long addr, unsigned long len)
{
struct ipl_report_certificate *cert;
cert = vzalloc(sizeof(*cert));
if (!cert)
return -ENOMEM;
list_add_tail(&cert->list, &report->certificates);
cert->entry.addr = addr;
cert->entry.len = len;
cert->key = key;
report->size += sizeof(cert->entry);
report->size += cert->entry.len;
return 0;
}
struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib)
{
struct ipl_report *report;
report = vzalloc(sizeof(*report));
if (!report)
return ERR_PTR(-ENOMEM);
report->ipib = ipib;
INIT_LIST_HEAD(&report->components);
INIT_LIST_HEAD(&report->certificates);
report->size = ALIGN(ipib->hdr.len, 8);
report->size += sizeof(struct ipl_rl_hdr);
report->size += sizeof(struct ipl_rb_components);
report->size += sizeof(struct ipl_rb_certificates);
return report;
}
void *ipl_report_finish(struct ipl_report *report)
{
struct ipl_report_certificate *cert;
struct ipl_report_component *comp;
struct ipl_rb_certificates *certs;
struct ipl_parameter_block *ipib;
struct ipl_rb_components *comps;
struct ipl_rl_hdr *rl_hdr;
void *buf, *ptr;
buf = vzalloc(report->size);
if (!buf)
return ERR_PTR(-ENOMEM);
ptr = buf;
memcpy(ptr, report->ipib, report->ipib->hdr.len);
ipib = ptr;
if (ipl_secure_flag)
ipib->hdr.flags |= IPL_PL_FLAG_SIPL;
ipib->hdr.flags |= IPL_PL_FLAG_IPLSR;
ptr += report->ipib->hdr.len;
ptr = PTR_ALIGN(ptr, 8);
rl_hdr = ptr;
ptr += sizeof(*rl_hdr);
comps = ptr;
comps->rbt = IPL_RBT_COMPONENTS;
ptr += sizeof(*comps);
list_for_each_entry(comp, &report->components, list) {
memcpy(ptr, &comp->entry, sizeof(comp->entry));
ptr += sizeof(comp->entry);
}
comps->len = ptr - (void *)comps;
certs = ptr;
certs->rbt = IPL_RBT_CERTIFICATES;
ptr += sizeof(*certs);
list_for_each_entry(cert, &report->certificates, list) {
memcpy(ptr, &cert->entry, sizeof(cert->entry));
ptr += sizeof(cert->entry);
}
certs->len = ptr - (void *)certs;
rl_hdr->len = ptr - (void *)rl_hdr;
list_for_each_entry(cert, &report->certificates, list) {
memcpy(ptr, cert->key, cert->entry.len);
ptr += cert->entry.len;
}
BUG_ON(ptr > buf + report->size);
return buf;
}
int ipl_report_free(struct ipl_report *report)
{
struct ipl_report_component *comp, *ncomp;
struct ipl_report_certificate *cert, *ncert;
list_for_each_entry_safe(comp, ncomp, &report->components, list)
vfree(comp);
list_for_each_entry_safe(cert, ncert, &report->certificates, list)
vfree(cert);
vfree(report);
return 0;
}
#endif

Просмотреть файл

@ -11,11 +11,11 @@ size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
char has_lowercase = 0;
len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
if ((ipb->ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP) &&
(ipb->ccw.vm_parm_len > 0)) {
len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
len = min_t(size_t, size - 1, ipb->ccw.vm_parm_len);
memcpy(dest, ipb->ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/

Просмотреть файл

@ -26,6 +26,7 @@
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@ -73,7 +74,6 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
{.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
@ -81,14 +81,16 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
{.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
{.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
{.irq = IRQIO_GAL, .name = "GAL", .desc = "[I/O] GIB Alert"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[AIO] QDIO Adapter Interrupt"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[AIO] AP Bus"},
{.irq = IRQIO_PCF, .name = "PCF", .desc = "[AIO] PCI Floating Interrupt"},
{.irq = IRQIO_PCD, .name = "PCD", .desc = "[AIO] PCI Directed Interrupt"},
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[AIO] MSI Interrupt"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[AIO] Virtual I/O Devices AI"},
{.irq = IRQIO_GAL, .name = "GAL", .desc = "[AIO] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
@ -116,6 +118,34 @@ void do_IRQ(struct pt_regs *regs, int irq)
set_irq_regs(old_regs);
}
static void show_msi_interrupt(struct seq_file *p, int irq)
{
struct irq_desc *desc;
unsigned long flags;
int cpu;
irq_lock_sparse();
desc = irq_to_desc(irq);
if (!desc)
goto out;
raw_spin_lock_irqsave(&desc->lock, flags);
seq_printf(p, "%3d: ", irq);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
if (desc->irq_data.chip)
seq_printf(p, " %8s", desc->irq_data.chip->name);
if (desc->action)
seq_printf(p, " %s", desc->action->name);
seq_putc(p, '\n');
raw_spin_unlock_irqrestore(&desc->lock, flags);
out:
irq_unlock_sparse();
}
/*
* show_interrupts is needed by /proc/interrupts.
*/
@ -128,7 +158,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (index == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_printf(p, "CPU%-8d", cpu);
seq_putc(p, '\n');
}
if (index < NR_IRQS_BASE) {
@ -139,9 +169,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
goto out;
}
if (index > NR_IRQS_BASE)
if (index < nr_irqs) {
show_msi_interrupt(p, index);
goto out;
}
for (index = 0; index < NR_ARCH_IRQS; index++) {
seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
irq = irqclass_sub_desc[index].irq;

Просмотреть файл

@ -10,19 +10,26 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/ipl.h>
#include <asm/setup.h>
static int kexec_file_add_elf_kernel(struct kimage *image,
struct s390_load_data *data,
char *kernel, unsigned long kernel_len)
static int kexec_file_add_kernel_elf(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
Elf_Addr entry;
void *kernel;
int i, ret;
kernel = image->kernel_buf;
ehdr = (Elf_Ehdr *)kernel;
buf.image = image;
if (image->type == KEXEC_TYPE_CRASH)
entry = STARTUP_KDUMP_OFFSET;
else
entry = ehdr->e_entry;
phdr = (void *)ehdr + ehdr->e_phoff;
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
@ -33,30 +40,27 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
buf.bufsz = phdr->p_filesz;
buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
buf.memsz = phdr->p_memsz;
if (phdr->p_paddr == 0) {
data->kernel_buf = buf.buffer;
data->memsz += STARTUP_NORMAL_OFFSET;
buf.buffer += STARTUP_NORMAL_OFFSET;
buf.bufsz -= STARTUP_NORMAL_OFFSET;
buf.mem += STARTUP_NORMAL_OFFSET;
buf.memsz -= STARTUP_NORMAL_OFFSET;
}
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = phdr->p_memsz;
data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
if (entry - phdr->p_paddr < phdr->p_memsz) {
data->kernel_buf = buf.buffer;
data->kernel_mem = buf.mem;
data->parm = buf.buffer + PARMAREA;
}
ipl_report_add_component(data->report, &buf,
IPL_RB_COMPONENT_FLAG_SIGNED |
IPL_RB_COMPONENT_FLAG_VERIFIED,
IPL_RB_CERT_UNKNOWN);
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
data->memsz += buf.memsz;
}
return 0;
return data->memsz ? 0 : -EINVAL;
}
static void *s390_elf_load(struct kimage *image,
@ -64,11 +68,10 @@ static void *s390_elf_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
struct s390_load_data data = {0};
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
size_t size;
int i, ret;
int i;
/* image->fobs->probe already checked for valid ELF magic number. */
ehdr = (Elf_Ehdr *)kernel;
@ -101,24 +104,7 @@ static void *s390_elf_load(struct kimage *image,
if (size > kernel_len)
return ERR_PTR(-EINVAL);
ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len);
if (ret)
return ERR_PTR(ret);
if (!data.memsz)
return ERR_PTR(-EINVAL);
if (initrd) {
ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
if (ret)
return ERR_PTR(ret);
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
return ERR_PTR(ret);
return kexec_file_update_kernel(image, &data);
return kexec_file_add_components(image, kexec_file_add_kernel_elf);
}
static int s390_elf_probe(const char *buf, unsigned long len)
@ -144,4 +130,7 @@ static int s390_elf_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_elf_ops = {
.probe = s390_elf_probe,
.load = s390_elf_load,
#ifdef CONFIG_KEXEC_VERIFY_SIG
.verify_sig = s390_verify_sig,
#endif /* CONFIG_KEXEC_VERIFY_SIG */
};

Просмотреть файл

@ -10,31 +10,34 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/ipl.h>
#include <asm/setup.h>
static int kexec_file_add_image_kernel(struct kimage *image,
struct s390_load_data *data,
char *kernel, unsigned long kernel_len)
static int kexec_file_add_kernel_image(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
buf.buffer = kernel + STARTUP_NORMAL_OFFSET;
buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET;
buf.buffer = image->kernel_buf;
buf.bufsz = image->kernel_buf_len;
buf.mem = STARTUP_NORMAL_OFFSET;
buf.mem = 0;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
ret = kexec_add_buffer(&buf);
data->kernel_buf = image->kernel_buf;
data->kernel_mem = buf.mem;
data->parm = image->kernel_buf + PARMAREA;
data->memsz += buf.memsz;
data->kernel_buf = kernel;
data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET;
return ret;
ipl_report_add_component(data->report, &buf,
IPL_RB_COMPONENT_FLAG_SIGNED |
IPL_RB_COMPONENT_FLAG_VERIFIED,
IPL_RB_CERT_UNKNOWN);
return kexec_add_buffer(&buf);
}
static void *s390_image_load(struct kimage *image,
@ -42,24 +45,7 @@ static void *s390_image_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
struct s390_load_data data = {0};
int ret;
ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len);
if (ret)
return ERR_PTR(ret);
if (initrd) {
ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
if (ret)
return ERR_PTR(ret);
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
return ERR_PTR(ret);
return kexec_file_update_kernel(image, &data);
return kexec_file_add_components(image, kexec_file_add_kernel_image);
}
static int s390_image_probe(const char *buf, unsigned long len)
@ -73,4 +59,7 @@ static int s390_image_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_image_ops = {
.probe = s390_image_probe,
.load = s390_image_load,
#ifdef CONFIG_KEXEC_VERIFY_SIG
.verify_sig = s390_verify_sig,
#endif /* CONFIG_KEXEC_VERIFY_SIG */
};

Просмотреть файл

@ -27,29 +27,30 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
DEFINE_INSN_CACHE_OPS(dmainsn);
DEFINE_INSN_CACHE_OPS(s390_insn);
static void *alloc_dmainsn_page(void)
static int insn_page_in_use;
static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static void *alloc_s390_insn_page(void)
{
void *page;
page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (page)
set_memory_x((unsigned long) page, 1);
return page;
if (xchg(&insn_page_in_use, 1) == 1)
return NULL;
set_memory_x((unsigned long) &insn_page, 1);
return &insn_page;
}
static void free_dmainsn_page(void *page)
static void free_s390_insn_page(void *page)
{
set_memory_nx((unsigned long) page, 1);
free_page((unsigned long)page);
xchg(&insn_page_in_use, 0);
}
struct kprobe_insn_cache kprobe_dmainsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
.alloc = alloc_dmainsn_page,
.free = free_dmainsn_page,
.pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
struct kprobe_insn_cache kprobe_s390_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
.alloc = alloc_s390_insn_page,
.free = free_s390_insn_page,
.pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
};
@ -102,7 +103,7 @@ static int s390_get_insn_slot(struct kprobe *p)
*/
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
p->ainsn.insn = get_dmainsn_slot();
p->ainsn.insn = get_s390_insn_slot();
else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
@ -114,7 +115,7 @@ static void s390_free_insn_slot(struct kprobe *p)
if (!p->ainsn.insn)
return;
if (is_kernel_addr(p->addr))
free_dmainsn_slot(p->ainsn.insn, 0);
free_s390_insn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
@ -572,7 +573,7 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
entry = search_exception_tables(regs->psw.addr);
entry = s390_search_extables(regs->psw.addr);
if (entry) {
regs->psw.addr = extable_fixup(entry);
return 1;

Просмотреть файл

@ -27,6 +27,7 @@
#include <asm/cacheflush.h>
#include <asm/os_info.h>
#include <asm/set_memory.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
@ -95,7 +96,7 @@ static void __do_machine_kdump(void *image)
start_kdump(1);
/* Die if start_kdump returns */
disabled_wait((unsigned long) __builtin_return_address(0));
disabled_wait();
}
/*
@ -253,6 +254,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
vmcoreinfo_append_str("EDMA=%lx\n", __edma);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
void machine_shutdown(void)
@ -280,7 +284,7 @@ static void __do_machine_kexec(void *data)
(*data_mover)(&image->head, image->start);
/* Die if kexec returns */
disabled_wait((unsigned long) __builtin_return_address(0));
disabled_wait();
}
/*

Просмотреть файл

@ -8,7 +8,12 @@
*/
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/verification.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/setup.h>
const struct kexec_file_ops * const kexec_file_loaders[] = {
@ -17,38 +22,78 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
NULL,
};
int *kexec_file_update_kernel(struct kimage *image,
struct s390_load_data *data)
#ifdef CONFIG_KEXEC_VERIFY_SIG
/*
* Module signature information block.
*
* The constituents of the signature section are, in order:
*
* - Signer's name
* - Key identifier
* - Signature data
* - Information block
*/
struct module_signature {
u8 algo; /* Public-key crypto algorithm [0] */
u8 hash; /* Digest algorithm [0] */
u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
u8 signer_len; /* Length of signer's name [0] */
u8 key_id_len; /* Length of key identifier [0] */
u8 __pad[3];
__be32 sig_len; /* Length of signature data */
};
#define PKEY_ID_PKCS7 2
int s390_verify_sig(const char *kernel, unsigned long kernel_len)
{
unsigned long *loc;
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
struct module_signature *ms;
unsigned long sig_len;
if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE)
return ERR_PTR(-EINVAL);
/* Skip signature verification when not secure IPLed. */
if (!ipl_secure_flag)
return 0;
if (image->cmdline_buf_len)
memcpy(data->kernel_buf + COMMAND_LINE_OFFSET,
image->cmdline_buf, image->cmdline_buf_len);
if (marker_len > kernel_len)
return -EKEYREJECTED;
if (image->type == KEXEC_TYPE_CRASH) {
loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET);
*loc = crashk_res.start;
if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
marker_len))
return -EKEYREJECTED;
kernel_len -= marker_len;
loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET);
*loc = crashk_res.end - crashk_res.start + 1;
ms = (void *)kernel + kernel_len - sizeof(*ms);
kernel_len -= sizeof(*ms);
sig_len = be32_to_cpu(ms->sig_len);
if (sig_len >= kernel_len)
return -EKEYREJECTED;
kernel_len -= sig_len;
if (ms->id_type != PKEY_ID_PKCS7)
return -EKEYREJECTED;
if (ms->algo != 0 ||
ms->hash != 0 ||
ms->signer_len != 0 ||
ms->key_id_len != 0 ||
ms->__pad[0] != 0 ||
ms->__pad[1] != 0 ||
ms->__pad[2] != 0) {
return -EBADMSG;
}
if (image->initrd_buf) {
loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET);
*loc = data->initrd_load_addr;
loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET);
*loc = image->initrd_buf_len;
}
return NULL;
return verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
}
#endif /* CONFIG_KEXEC_VERIFY_SIG */
static int kexec_file_update_purgatory(struct kimage *image)
static int kexec_file_update_purgatory(struct kimage *image,
struct s390_load_data *data)
{
u64 entry, type;
int ret;
@ -90,7 +135,8 @@ static int kexec_file_update_purgatory(struct kimage *image)
return ret;
}
int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
static int kexec_file_add_purgatory(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
@ -105,21 +151,21 @@ int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
ret = kexec_load_purgatory(image, &buf);
if (ret)
return ret;
data->memsz += buf.memsz;
ret = kexec_file_update_purgatory(image);
return ret;
return kexec_file_update_purgatory(image, data);
}
int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
char *initrd, unsigned long initrd_len)
static int kexec_file_add_initrd(struct kimage *image,
struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
buf.buffer = initrd;
buf.bufsz = initrd_len;
buf.buffer = image->initrd_buf;
buf.bufsz = image->initrd_buf_len;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
@ -127,11 +173,115 @@ int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
data->initrd_load_addr = buf.mem;
data->parm->initrd_start = buf.mem;
data->parm->initrd_size = buf.memsz;
data->memsz += buf.memsz;
ret = kexec_add_buffer(&buf);
return ret;
if (ret)
return ret;
return ipl_report_add_component(data->report, &buf, 0, 0);
}
static int kexec_file_add_ipl_report(struct kimage *image,
struct s390_load_data *data)
{
__u32 *lc_ipl_parmblock_ptr;
unsigned int len, ncerts;
struct kexec_buf buf;
unsigned long addr;
void *ptr, *end;
buf.image = image;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
ptr = (void *)ipl_cert_list_addr;
end = ptr + ipl_cert_list_size;
ncerts = 0;
while (ptr < end) {
ncerts++;
len = *(unsigned int *)ptr;
ptr += sizeof(len);
ptr += len;
}
addr = data->memsz + data->report->size;
addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
ptr = (void *)ipl_cert_list_addr;
while (ptr < end) {
len = *(unsigned int *)ptr;
ptr += sizeof(len);
ipl_report_add_certificate(data->report, ptr, addr, len);
addr += len;
ptr += len;
}
buf.buffer = ipl_report_finish(data->report);
buf.bufsz = data->report->size;
buf.memsz = buf.bufsz;
data->memsz += buf.memsz;
lc_ipl_parmblock_ptr =
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
return kexec_add_buffer(&buf);
}
void *kexec_file_add_components(struct kimage *image,
int (*add_kernel)(struct kimage *image,
struct s390_load_data *data))
{
struct s390_load_data data = {0};
int ret;
data.report = ipl_report_init(&ipl_block);
if (IS_ERR(data.report))
return data.report;
ret = add_kernel(image, &data);
if (ret)
goto out;
if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
ret = -EINVAL;
goto out;
}
memcpy(data.parm->command_line, image->cmdline_buf,
image->cmdline_buf_len);
if (image->type == KEXEC_TYPE_CRASH) {
data.parm->oldmem_base = crashk_res.start;
data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
}
if (image->initrd_buf) {
ret = kexec_file_add_initrd(image, &data);
if (ret)
goto out;
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
goto out;
if (data.kernel_mem == 0) {
unsigned long restart_psw = 0x0008000080000000UL;
restart_psw += image->start;
memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
image->start = 0;
}
ret = kexec_file_add_ipl_report(image, &data);
out:
ipl_report_free(data.report);
return ERR_PTR(ret);
}
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
@ -140,7 +290,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *symtab)
{
Elf_Rela *relas;
int i;
int i, r_type;
relas = (void *)pi->ehdr + relsec->sh_offset;
@ -174,46 +324,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
addr = section->sh_addr + relas[i].r_offset;
switch (ELF64_R_TYPE(relas[i].r_info)) {
case R_390_8: /* Direct 8 bit. */
*(u8 *)loc = val;
break;
case R_390_12: /* Direct 12 bit. */
*(u16 *)loc &= 0xf000;
*(u16 *)loc |= val & 0xfff;
break;
case R_390_16: /* Direct 16 bit. */
*(u16 *)loc = val;
break;
case R_390_20: /* Direct 20 bit. */
*(u32 *)loc &= 0xf00000ff;
*(u32 *)loc |= (val & 0xfff) << 16; /* DL */
*(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
break;
case R_390_32: /* Direct 32 bit. */
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
*(u16 *)loc = (val - addr);
break;
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
*(u16 *)loc = (val - addr) >> 1;
break;
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
*(u32 *)loc = (val - addr) >> 1;
break;
case R_390_PC32: /* PC relative 32 bit. */
*(u32 *)loc = (val - addr);
break;
case R_390_PC64: /* PC relative 64 bit. */
*(u64 *)loc = (val - addr);
break;
default:
break;
}
r_type = ELF64_R_TYPE(relas[i].r_info);
arch_kexec_do_relocs(r_type, loc, val, addr);
}
return 0;
}
@ -225,10 +337,8 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
* load memory in head.S will be accessed, e.g. to register the next
* command line. If the next kernel were smaller the current kernel
* will panic at load.
*
* 0x11000 = sizeof(head.S)
*/
if (buf_len < 0x11000)
if (buf_len < HEAD_END)
return -ENOEXEC;
return kexec_image_probe_default(image, buf, buf_len);

Просмотреть файл

@ -0,0 +1,53 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/elf.h>
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr)
{
switch (r_type) {
case R_390_NONE:
break;
case R_390_8: /* Direct 8 bit. */
*(u8 *)loc = val;
break;
case R_390_12: /* Direct 12 bit. */
*(u16 *)loc &= 0xf000;
*(u16 *)loc |= val & 0xfff;
break;
case R_390_16: /* Direct 16 bit. */
*(u16 *)loc = val;
break;
case R_390_20: /* Direct 20 bit. */
*(u32 *)loc &= 0xf00000ff;
*(u32 *)loc |= (val & 0xfff) << 16; /* DL */
*(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
break;
case R_390_32: /* Direct 32 bit. */
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
*(u16 *)loc = (val - addr);
break;
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
*(u16 *)loc = (val - addr) >> 1;
break;
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
*(u32 *)loc = (val - addr) >> 1;
break;
case R_390_PC32: /* PC relative 32 bit. */
*(u32 *)loc = (val - addr);
break;
case R_390_PC64: /* PC relative 64 bit. */
*(u64 *)loc = (val - addr);
break;
case R_390_RELATIVE:
*(unsigned long *) loc = val;
break;
default:
return 1;
}
return 0;
}

Просмотреть файл

@ -20,6 +20,7 @@
ENTRY(ftrace_stub)
BR_EX %r14
ENDPROC(ftrace_stub)
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@ -28,7 +29,7 @@ ENTRY(ftrace_stub)
ENTRY(_mcount)
BR_EX %r14
ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_caller)
@ -61,10 +62,11 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
ENTRY(ftrace_graph_caller)
.globl ftrace_graph_caller
ftrace_graph_caller:
j ftrace_graph_caller_end
lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r3,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r4,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
@ -73,6 +75,7 @@ ftrace_graph_caller_end:
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
BR_EX %r1
ENDPROC(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -86,5 +89,6 @@ ENTRY(return_to_handler)
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
BR_EX %r14
ENDPROC(return_to_handler)
#endif

Просмотреть файл

@ -125,7 +125,7 @@ void nmi_free_per_cpu(struct lowcore *lc)
static notrace void s390_handle_damage(void)
{
smp_emergency_stop();
disabled_wait((unsigned long) __builtin_return_address(0));
disabled_wait();
while (1);
}
NOKPROBE_SYMBOL(s390_handle_damage);

Просмотреть файл

@ -38,7 +38,7 @@ static int __init nospec_report(void)
{
if (test_facility(156))
pr_info("Spectre V2 mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
@ -64,10 +64,10 @@ void __init nospec_auto_detect(void)
* The machine supports etokens.
* Disable expolines and disable nobp.
*/
if (IS_ENABLED(CC_USING_EXPOLINE))
if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
} else if (IS_ENABLED(CC_USING_EXPOLINE)) {
} else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.

Просмотреть файл

@ -15,7 +15,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
{
if (test_facility(156))
return sprintf(buf, "Mitigation: etokens\n");
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");

Просмотреть файл

@ -2,8 +2,8 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012, 2017
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Copyright IBM Corp. 2012, 2019
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@ -26,7 +26,7 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
set = CPUMF_CTR_SET_USER;
else if (event < 128)
set = CPUMF_CTR_SET_CRYPTO;
else if (event < 256)
else if (event < 288)
set = CPUMF_CTR_SET_EXT;
else if (event >= 448 && event < 496)
set = CPUMF_CTR_SET_MT_DIAG;
@ -50,12 +50,19 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_CRYPTO:
if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
hwc->config > 79) ||
(cpuhw->info.csvn >= 6 && hwc->config > 83))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_EXT:
if (cpuhw->info.csvn < 1)
err = -EOPNOTSUPP;
if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
(cpuhw->info.csvn == 2 && hwc->config > 175) ||
(cpuhw->info.csvn > 2 && hwc->config > 255))
(cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
&& hwc->config > 255) ||
(cpuhw->info.csvn >= 6 && hwc->config > 287))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MT_DIAG:

Просмотреть файл

@ -306,15 +306,20 @@ static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
ctrset_size = 2;
break;
case CPUMF_CTR_SET_CRYPTO:
ctrset_size = 16;
if (info->csvn >= 1 && info->csvn <= 5)
ctrset_size = 16;
else if (info->csvn == 6)
ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
if (info->csvn == 1)
ctrset_size = 32;
else if (info->csvn == 2)
ctrset_size = 48;
else if (info->csvn >= 3)
else if (info->csvn >= 3 && info->csvn <= 5)
ctrset_size = 128;
else if (info->csvn == 6)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
if (info->csvn > 3)

Просмотреть файл

@ -31,22 +31,26 @@ CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_FUNCTIONS, 0x0040);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_CYCLES, 0x0041);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS, 0x0042);
CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_CYCLES, 0x0043);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_FUNCTIONS, 0x0044);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_CYCLES, 0x0045);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS, 0x0046);
CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_CYCLES, 0x0047);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_FUNCTIONS, 0x0048);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_CYCLES, 0x0049);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS, 0x004a);
CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_CYCLES, 0x004b);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_FUNCTIONS, 0x004c);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_CYCLES, 0x004d);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS, 0x004e);
CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_CYCLES, 0x004f);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_FUNCTION_COUNT, 0x0050);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_CYCLES_COUNT, 0x0051);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT, 0x0052);
CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT, 0x0053);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
@ -262,23 +266,47 @@ static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
NULL,
};
static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
NULL,
};
static struct attribute *cpumcf_svn_6_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
CPUMF_EVENT_PTR(cf_svn_6, ECC_FUNCTION_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_CYCLES_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT),
CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT),
NULL,
};
@ -562,7 +590,18 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
default:
cfvn = none;
}
csvn = cpumcf_svn_generic_pmu_event_attr;
/* Determine version specific crypto set */
switch (ci.csvn) {
case 1 ... 5:
csvn = cpumcf_svn_12345_pmu_event_attr;
break;
case 6:
csvn = cpumcf_svn_6_pmu_event_attr;
break;
default:
csvn = none;
}
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);

Просмотреть файл

@ -21,6 +21,7 @@
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
#include <asm/unwind.h>
const char *perf_pmu_name(void)
{
@ -219,20 +220,13 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
{
struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, address);
return 0;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
if (user_mode(regs))
return;
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
struct unwind_state state;
unwind_for_each_frame(&state, current, regs, 0)
perf_callchain_store(entry, state.ip);
}
/* Perf definitions for PMU event attributes in sysfs */

Просмотреть файл

@ -7,7 +7,7 @@
#include <linux/linkage.h>
#define PGM_CHECK(handler) .long handler
#define PGM_CHECK(handler) .quad handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*

Просмотреть файл

@ -37,6 +37,7 @@
#include <asm/irq.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include "entry.h"

Просмотреть файл

@ -109,7 +109,8 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
"vxe2", "vxp", "sort", "dflt"
};
static const char * const int_hwcap_str[] = {
"sie"

Просмотреть файл

@ -73,6 +73,7 @@ ENTRY(store_status)
lgr %r9,%r2
lgr %r2,%r3
BR_EX %r9
ENDPROC(store_status)
.section .bss
.align 8

Просмотреть файл

@ -58,11 +58,15 @@ ENTRY(relocate_kernel)
j .base
.done:
sgr %r0,%r0 # clear register r0
cghi %r3,0
je .diag
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
.diag:
diag %r0,%r0,0x308
ENDPROC(relocate_kernel)
.align 8
load_psw:

Просмотреть файл

@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/facility.h>
#include <asm/smp.h>
@ -65,11 +66,13 @@
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
#include <asm/stacktrace.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
#include <asm/uv.h>
#include "entry.h"
/*
@ -89,12 +92,25 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
#endif
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
unsigned long __bootdata_preserved(__swsusp_reset_dma);
unsigned long __bootdata_preserved(__stext_dma);
unsigned long __bootdata_preserved(__etext_dma);
unsigned long __bootdata_preserved(__sdma);
unsigned long __bootdata_preserved(__edma);
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@ -736,6 +752,15 @@ static void __init reserve_initrd(void)
#endif
}
/*
* Reserve the memory area used to pass the certificate lists
*/
static void __init reserve_certificate_list(void)
{
if (ipl_cert_list_addr)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
static void __init reserve_mem_detect_info(void)
{
unsigned long start, size;
@ -814,9 +839,10 @@ static void __init reserve_kernel(void)
{
unsigned long start_pfn = PFN_UP(__pa(_end));
memblock_reserve(0, PARMAREA_END);
memblock_reserve(0, HEAD_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
memblock_reserve(__sdma, __edma - __sdma);
}
static void __init setup_memory(void)
@ -914,7 +940,15 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
if (test_facility(148))
elf_hwcap |= HWCAP_S390_VXRS_EXT2;
if (test_facility(152))
elf_hwcap |= HWCAP_S390_VXRS_PDE;
}
if (test_facility(150))
elf_hwcap |= HWCAP_S390_SORT;
if (test_facility(151))
elf_hwcap |= HWCAP_S390_DFLT;
/*
* Guarded storage support HWCAP_S390_GS is bit 12.
@ -1022,6 +1056,38 @@ static void __init setup_control_program_code(void)
asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
}
/*
* Print the component list from the IPL report
*/
static void __init log_component_list(void)
{
struct ipl_rb_component_entry *ptr, *end;
char *str;
if (!early_ipl_comp_list_addr)
return;
if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
pr_info("Linux is running with Secure-IPL enabled\n");
else
pr_info("Linux is running with Secure-IPL disabled\n");
ptr = (void *) early_ipl_comp_list_addr;
end = (void *) ptr + early_ipl_comp_list_size;
pr_info("The IPL report contains the following components:\n");
while (ptr < end) {
if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
str = "signed, verified";
else
str = "signed, verification failed";
} else {
str = "not signed";
}
pr_info("%016llx - %016llx (%s)\n",
ptr->addr, ptr->addr + ptr->len, str);
ptr++;
}
}
/*
* Setup function called from init/main.c just after the banner
* was printed.
@ -1042,6 +1108,8 @@ void __init setup_arch(char **cmdline_p)
else
pr_info("Linux is running as a guest in 64-bit mode\n");
log_component_list();
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
@ -1073,6 +1141,7 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
reserve_mem_detect_info();
memblock_allow_resize();

Просмотреть файл

@ -53,6 +53,7 @@
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
#include <asm/topology.h>
#include "entry.h"
@ -689,7 +690,7 @@ void __init smp_save_dump_cpus(void)
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
}
memblock_free(page, PAGE_SIZE);
diag308_reset();
diag_dma_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */

Просмотреть файл

@ -11,59 +11,52 @@
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/export.h>
static int __save_address(void *data, unsigned long address, int nosched)
{
struct stack_trace *trace = data;
if (nosched && in_sched_functions(address))
return 0;
if (trace->skip > 0) {
trace->skip--;
return 0;
}
if (trace->nr_entries < trace->max_entries) {
trace->entries[trace->nr_entries++] = address;
return 0;
}
return 1;
}
static int save_address(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 0);
}
static int save_address_nosched(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 1);
}
#include <asm/stacktrace.h>
#include <asm/unwind.h>
void save_stack_trace(struct stack_trace *trace)
{
unsigned long sp;
struct unwind_state state;
sp = current_stack_pointer();
dump_trace(save_address, trace, NULL, sp);
unwind_for_each_frame(&state, current, NULL, 0) {
if (trace->nr_entries >= trace->max_entries)
break;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = state.ip;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long sp;
struct unwind_state state;
sp = tsk->thread.ksp;
if (tsk == current)
sp = current_stack_pointer();
dump_trace(save_address_nosched, trace, tsk, sp);
unwind_for_each_frame(&state, tsk, NULL, 0) {
if (trace->nr_entries >= trace->max_entries)
break;
if (in_sched_functions(state.ip))
continue;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = state.ip;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
unsigned long sp;
struct unwind_state state;
sp = kernel_stack_pointer(regs);
dump_trace(save_address, trace, NULL, sp);
unwind_for_each_frame(&state, current, regs, 0) {
if (trace->nr_entries >= trace->max_entries)
break;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = state.ip;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);

Просмотреть файл

@ -108,6 +108,7 @@ ENTRY(swsusp_arch_suspend)
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
ENDPROC(swsusp_arch_suspend)
/*
* Restore saved memory image to correct place and restore register context.
@ -154,20 +155,13 @@ ENTRY(swsusp_arch_resume)
ptlb /* flush tlb */
/* Reset System */
larl %r1,restart_entry
larl %r2,.Lrestart_diag308_psw
og %r1,0(%r2)
stg %r1,0(%r0)
larl %r1,.Lnew_pgm_check_psw
epsw %r2,%r3
stm %r2,%r3,0(%r1)
mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
lghi %r0,0
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
larl %r1,__swsusp_reset_dma
lg %r1,0(%r1)
BASR_EX %r14,%r1
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
@ -267,6 +261,7 @@ restore_registers:
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
ENDPROC(swsusp_arch_resume)
.section .data..nosave,"aw",@progbits
.align 8
@ -275,8 +270,6 @@ restore_registers:
.Lpanic_string:
.asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000
.Lrestart_suspend_psw:
.quad 0x0000000180000000,restart_suspend
.Lnew_pgm_check_psw:

Просмотреть файл

@ -49,7 +49,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
report_user_fault(regs, si_signo, 0);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr);
fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
else {
@ -263,5 +263,6 @@ NOKPROBE_SYMBOL(kernel_stack_overflow);
void __init trap_init(void)
{
sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
local_mcck_enable();
}

Просмотреть файл

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <asm/sections.h>
#include <asm/ptrace.h>
#include <asm/bitops.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
if (unwind_done(state))
return 0;
return __kernel_text_address(state->ip) ? state->ip : 0;
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
{
return (sp <= state->sp) ||
(sp + sizeof(struct stack_frame) > state->stack_info.end);
}
static bool update_stack_info(struct unwind_state *state, unsigned long sp)
{
struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask;
/* New stack pointer leaves the current stack */
if (get_stack_info(sp, state->task, info, mask) != 0 ||
!on_stack(info, sp, sizeof(struct stack_frame)))
/* 'sp' does not point to a valid stack */
return false;
return true;
}
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
struct stack_frame *sf;
struct pt_regs *regs;
unsigned long sp, ip;
bool reliable;
regs = state->regs;
if (unlikely(regs)) {
sp = READ_ONCE_TASK_STACK(state->task, regs->gprs[15]);
if (unlikely(outside_of_stack(state, sp))) {
if (!update_stack_info(state, sp))
goto out_err;
}
sf = (struct stack_frame *) sp;
ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
reliable = false;
regs = NULL;
} else {
sf = (struct stack_frame *) state->sp;
sp = READ_ONCE_TASK_STACK(state->task, sf->back_chain);
if (likely(sp)) {
/* Non-zero back-chain points to the previous frame */
if (unlikely(outside_of_stack(state, sp))) {
if (!update_stack_info(state, sp))
goto out_err;
}
sf = (struct stack_frame *) sp;
ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
reliable = true;
} else {
/* No back-chain, look for a pt_regs structure */
sp = state->sp + STACK_FRAME_OVERHEAD;
if (!on_stack(info, sp, sizeof(struct pt_regs)))
goto out_stop;
regs = (struct pt_regs *) sp;
if (user_mode(regs))
goto out_stop;
ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
reliable = true;
}
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Decode any ftrace redirection */
if (ip == (unsigned long) return_to_handler)
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
ip, (void *) sp);
#endif
/* Update unwind state */
state->sp = sp;
state->ip = ip;
state->regs = regs;
state->reliable = reliable;
return true;
out_err:
state->error = true;
out_stop:
state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long sp)
{
struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask;
struct stack_frame *sf;
unsigned long ip;
bool reliable;
memset(state, 0, sizeof(*state));
state->task = task;
state->regs = regs;
/* Don't even attempt to start from user mode regs: */
if (regs && user_mode(regs)) {
info->type = STACK_TYPE_UNKNOWN;
return;
}
/* Get current stack pointer and initialize stack info */
if (get_stack_info(sp, task, info, mask) != 0 ||
!on_stack(info, sp, sizeof(struct stack_frame))) {
/* Something is wrong with the stack pointer */
info->type = STACK_TYPE_UNKNOWN;
state->error = true;
return;
}
/* Get the instruction pointer from pt_regs or the stack frame */
if (regs) {
ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
reliable = true;
} else {
sf = (struct stack_frame *) sp;
ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
reliable = false;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Decode any ftrace redirection */
if (ip == (unsigned long) return_to_handler)
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
ip, NULL);
#endif
/* Update unwind state */
state->sp = sp;
state->ip = ip;
state->reliable = reliable;
}
EXPORT_SYMBOL_GPL(__unwind_start);

Просмотреть файл

@ -29,7 +29,7 @@
#include <asm/vdso.h>
#include <asm/facility.h>
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
@ -55,7 +55,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
@ -76,7 +76,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm)
vdso_pages = vdso32_pages;
#endif
@ -223,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT_VDSO
mm->context.compat_mm = is_compat_task();
if (mm->context.compat_mm)
vdso_pages = vdso32_pages;
@ -280,7 +280,7 @@ static int __init vdso_init(void)
int i;
vdso_init_data(vdso_data);
#ifdef CONFIG_COMPAT
#ifdef CONFIG_COMPAT_VDSO
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;

Просмотреть файл

@ -19,7 +19,7 @@ KBUILD_AFLAGS_31 += -m31 -s
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=both)
-Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)

Просмотреть файл

@ -19,7 +19,7 @@ KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=both)
-Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)

Просмотреть файл

@ -72,6 +72,7 @@ SECTIONS
__end_ro_after_init = .;
RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
BOOT_DATA_PRESERVED
_edata = .; /* End of data section */
@ -143,6 +144,18 @@ SECTIONS
INIT_DATA_SECTION(0x100)
PERCPU_SECTION(0x100)
.dynsym ALIGN(8) : {
__dynsym_start = .;
*(.dynsym)
__dynsym_end = .;
}
.rela.dyn ALIGN(8) : {
__rela_dyn_start = .;
*(.rela*)
__rela_dyn_end = .;
}
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
@ -161,6 +174,12 @@ SECTIONS
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */
QUAD(__boot_data_preserved_end -
__boot_data_preserved_start) /* bootdata_preserved_size */
QUAD(__dynsym_start) /* dynsym_start */
QUAD(__rela_dyn_start) /* rela_dyn_start */
QUAD(__rela_dyn_end) /* rela_dyn_end */
} :NONE
/* Debugging sections. */

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше