From 7010adcdd23527f7efef1e7bc0d8c458f6ec0dd2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 23 Apr 2018 00:02:09 +0200 Subject: [PATCH 01/11] x86/jailhouse: Fix incorrect SPDX identifier GPL2.0 is not a valid SPDX identiier. Replace it with GPL-2.0. Fixes: 4a362601baa6 ("x86/jailhouse: Add infrastructure for running in non-root cell") Signed-off-by: Thomas Gleixner Acked-by: Jan Kiszka Cc: Kate Stewart Cc: Jonathan Corbet Cc: Greg Kroah-Hartman Cc: Philippe Ombredanne Link: https://lkml.kernel.org/r/20180422220832.815346488@linutronix.de --- arch/x86/include/asm/jailhouse_para.h | 2 +- arch/x86/kernel/jailhouse.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h index b885a961a150..a34897aef2c2 100644 --- a/arch/x86/include/asm/jailhouse_para.h +++ b/arch/x86/include/asm/jailhouse_para.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL2.0 */ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Jailhouse paravirt detection diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index fa183a131edc..a15fe0e92cf9 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL2.0 +// SPDX-License-Identifier: GPL-2.0 /* * Jailhouse paravirt_ops implementation * From 84749d83758af6576552046b215b9b7f37f9556b Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 21 Apr 2018 10:19:29 +0200 Subject: [PATCH 02/11] x86/microcode/intel: Save microcode patch unconditionally save_mc_for_early() was a no-op on !CONFIG_HOTPLUG_CPU but the generic_load_microcode() path saves the microcode patches it has found into the cache of patches which is used for late loading too. Regardless of whether CPU hotplug is used or not. Make the saving unconditional so that late loading can find the proper patch. Reported-by: Vitezslav Samel Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Tested-by: Vitezslav Samel Tested-by: Ashok Raj Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180418081140.GA2439@pc11.op.pod.cz Link: https://lkml.kernel.org/r/20180421081930.15741-1-bp@alien8.de --- arch/x86/kernel/cpu/microcode/intel.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 32b8e5724f96..1c2cfa0644aa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -485,7 +485,6 @@ static void show_saved_mc(void) */ static void save_mc_for_early(u8 *mc, unsigned int size) { -#ifdef CONFIG_HOTPLUG_CPU /* Synchronization during CPU hotplug. */ static DEFINE_MUTEX(x86_cpu_microcode_mutex); @@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size) show_saved_mc(); mutex_unlock(&x86_cpu_microcode_mutex); -#endif } static bool load_builtin_intel_microcode(struct cpio_data *cp) From 09e182d17e8891dd73baba961a0f5a82e9274c97 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 21 Apr 2018 10:19:30 +0200 Subject: [PATCH 03/11] x86/microcode: Do not exit early from __reload_late() Vitezslav reported a case where the "Timeout during microcode update!" panic would hit. After a deeper look, it turned out that his .config had CONFIG_HOTPLUG_CPU disabled which practically made save_mc_for_early() a no-op. When that happened, the discovered microcode patch wasn't saved into the cache and the late loading path wouldn't find any. This, then, lead to early exit from __reload_late() and thus CPUs waiting until the timeout is reached, leading to the panic. In hindsight, that function should have been written so it does not return before the post-synchronization. Oh well, I know better now... Fixes: bb8c13d61a62 ("x86/microcode: Fix CPU synchronization routine") Reported-by: Vitezslav Samel Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Tested-by: Vitezslav Samel Tested-by: Ashok Raj Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180418081140.GA2439@pc11.op.pod.cz Link: https://lkml.kernel.org/r/20180421081930.15741-2-bp@alien8.de --- arch/x86/kernel/cpu/microcode/core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 10c4fc2c91f8..77e201301528 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -564,14 +564,12 @@ static int __reload_late(void *info) apply_microcode_local(&err); spin_unlock(&update_lock); + /* siblings return UCODE_OK because their engine got updated already */ if (err > UCODE_NFOUND) { pr_warn("Error reloading microcode on CPU %d\n", cpu); - return -1; - /* siblings return UCODE_OK because their engine got updated already */ + ret = -1; } else if (err == UCODE_UPDATED || err == UCODE_OK) { ret = 1; - } else { - return ret; } /* From 9124130573950dcfc06b6a59306edfda2fc33ec7 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Mon, 23 Apr 2018 11:29:22 -0700 Subject: [PATCH 04/11] x86/cpufeatures: Enumerate cldemote instruction cldemote is a new instruction in future x86 processors. It hints to hardware that a specified cache line should be moved ("demoted") from the cache(s) closest to the processor core to a level more distant from the processor core. This instruction is faster than snooping to make the cache line available for other cores. cldemote instruction is indicated by the presence of the CPUID feature flag CLDEMOTE (CPUID.(EAX=0x7, ECX=0):ECX[bit25]). More details on cldemote instruction can be found in the latest Intel Architecture Instruction Set Extensions and Future Features Programming Reference. Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "Ravi V Shankar" Cc: "H. Peter Anvin" Cc: "Ashok Raj" Link: https://lkml.kernel.org/r/1524508162-192587-1-git-send-email-fenghua.yu@intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d554c11e01ff..578793e97431 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -320,6 +320,7 @@ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ From e3072805c61167b85a30ceeef606620704db31f7 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 25 Apr 2018 10:05:53 +0800 Subject: [PATCH 05/11] x86/vector: Remove the macro VECTOR_OFFSET_START Now, Linux uses matrix allocator for vector assignment, the original assignment code which used VECTOR_OFFSET_START has been removed. So remove the stale macro as well. Fixes: commit 69cde0004a4b ("x86/vector: Use matrix allocator for vector assignment") Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Acked-by: David Rientjes Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180425020553.17210-1-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_vectors.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 404c5fdff859..57003074bd7a 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -34,11 +34,6 @@ * (0x80 is the syscall vector, 0x30-0x3f are for ISA) */ #define FIRST_EXTERNAL_VECTOR 0x20 -/* - * We start allocating at 0x21 to spread out vectors evenly between - * priority levels. (0x80 is the syscall vector) - */ -#define VECTOR_OFFSET_START 1 /* * Reserve the lowest usable vector (and hence lowest priority) 0x20 for From 7d878817db22f64c2b2c241335ec03e4c3fd5476 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 26 Apr 2018 14:08:32 +0800 Subject: [PATCH 06/11] x86/vector: Remove the unused macro FPU_IRQ The macro FPU_IRQ has never been used since v3.10, So remove it. Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180426060832.27312-1-douly.fnst@cn.fujitsu.com --- arch/x86/include/asm/irq_vectors.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 57003074bd7a..548d90bbf919 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -114,8 +114,6 @@ #define FIRST_SYSTEM_VECTOR NR_VECTORS #endif -#define FPU_IRQ 13 - /* * Size the maximum number of interrupts. * From 14d12bb8582e158006c35cce0f8ae1706094f9a4 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Thu, 12 Apr 2018 14:39:10 +0200 Subject: [PATCH 07/11] x86/mm: Make vmemmap and vmalloc base address constants unsigned long Commits 9b46a051e4 ("x86/mm: Initialize vmemmap_base at boot-time") and a7412546d8 ("x86/mm: Adjust vmalloc base and size at boot-time") lost the type information for __VMALLOC_BASE_L4, __VMALLOC_BASE_L5, __VMEMMAP_BASE_L4 and __VMEMMAP_BASE_L5 constants. Declare them explicitly unsigned long again. Fixes: 9b46a051e4 ("x86/mm: Initialize vmemmap_base at boot-time") Fixes: a7412546d8 ("x86/mm: Adjust vmalloc base and size at boot-time") Signed-off-by: Jiri Kosina Signed-off-by: Thomas Gleixner Acked-by: "Kirill A. Shutemov" Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1804121437350.28129@cbobk.fhfr.pm --- arch/x86/include/asm/pgtable_64_types.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index d5c21a382475..adb47552e6bb 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d; #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) -#define __VMALLOC_BASE_L4 0xffffc90000000000 -#define __VMALLOC_BASE_L5 0xffa0000000000000 +#define __VMALLOC_BASE_L4 0xffffc90000000000UL +#define __VMALLOC_BASE_L5 0xffa0000000000000UL #define VMALLOC_SIZE_TB_L4 32UL #define VMALLOC_SIZE_TB_L5 12800UL -#define __VMEMMAP_BASE_L4 0xffffea0000000000 -#define __VMEMMAP_BASE_L5 0xffd4000000000000 +#define __VMEMMAP_BASE_L4 0xffffea0000000000UL +#define __VMEMMAP_BASE_L5 0xffd4000000000000UL #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base From da6fa7ef67f07108a1b0cb9fd9e7fcaabd39c051 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Tue, 3 Apr 2018 09:02:28 -0500 Subject: [PATCH 08/11] x86/smpboot: Don't use mwait_play_dead() on AMD systems Recent AMD systems support using MWAIT for C1 state. However, MWAIT will not allow deeper cstates than C1 on current systems. play_dead() expects to use the deepest state available. The deepest state available on AMD systems is reached through SystemIO or HALT. If MWAIT is available, it is preferred over the other methods, so the CPU never reaches the deepest possible state. Don't try to use MWAIT to play_dead() on AMD systems. Instead, use CPUIDLE to enter the deepest state advertised by firmware. If CPUIDLE is not available then fallback to HALT. Signed-off-by: Yazen Ghannam Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: stable@vger.kernel.org Cc: Yazen Ghannam Link: https://lkml.kernel.org/r/20180403140228.58540-1-Yazen.Ghannam@amd.com --- arch/x86/kernel/smpboot.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 45175b81dd5b..0f1cbb042f49 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1571,6 +1571,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; if (!this_cpu_has(X86_FEATURE_CLFLUSH)) From b837913fc2d9061bf9b8c0dd6bf2d24e2f98b84a Mon Sep 17 00:00:00 2001 From: "jacek.tomaka@poczta.fm" Date: Tue, 24 Apr 2018 00:14:25 +0800 Subject: [PATCH 09/11] x86/cpu/intel: Add missing TLB cpuid values Make kernel print the correct number of TLB entries on Intel Xeon Phi 7210 (and others) Before: [ 0.320005] Last level dTLB entries: 4KB 0, 2MB 0, 4MB 0, 1GB 0 After: [ 0.320005] Last level dTLB entries: 4KB 256, 2MB 128, 4MB 128, 1GB 16 The entries do exist in the official Intel SMD but the type column there is incorrect (states "Cache" where it should read "TLB"), but the entries for the values 0x6B, 0x6C and 0x6D are correctly described as 'Data TLB'. Signed-off-by: Jacek Tomaka Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20180423161425.24366-1-jacekt@dugeo.com --- arch/x86/kernel/cpu/intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b9693b80fc21..60d1897041da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, From 3db3eb285259ac129f7aec6b814b3e9f6c1b372b Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Wed, 25 Apr 2018 12:08:35 +0200 Subject: [PATCH 10/11] x86/setup: Do not reserve a crash kernel region if booted on Xen PV Xen PV domains cannot shut down and start a crash kernel. Instead, the crashing kernel makes a SCHEDOP_shutdown hypercall with the reason code SHUTDOWN_crash, cf. xen_crash_shutdown() machine op in arch/x86/xen/enlighten_pv.c. A crash kernel reservation is merely a waste of RAM in this case. It may also confuse users of kexec_load(2) and/or kexec_file_load(2). When flags include KEXEC_ON_CRASH or KEXEC_FILE_ON_CRASH, respectively, these syscalls return success, which is technically correct, but the crash kexec image will never be actually used. Signed-off-by: Petr Tesarik Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Tom Lendacky Cc: Dou Liyang Cc: Mikulas Patocka Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: xen-devel@lists.xenproject.org Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Jean Delvare Link: https://lkml.kernel.org/r/20180425120835.23cef60c@ezekiel.suse.cz --- arch/x86/kernel/setup.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6285697b6e56..5c623dfe39d1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void) high = true; } + if (xen_pv_domain()) { + pr_info("Ignoring crashkernel for a Xen PV domain\n"); + return; + } + /* 0 means: find the address automatically */ if (crash_base <= 0) { /* From 1a512c0882bd311c5b5561840fcfbe4c25b8f319 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 24 Apr 2018 23:19:51 +0200 Subject: [PATCH 11/11] x86/ipc: Fix x32 version of shmid64_ds and msqid64_ds A bugfix broke the x32 shmid64_ds and msqid64_ds data structure layout (as seen from user space) a few years ago: Originally, __BITS_PER_LONG was defined as 64 on x32, so we did not have padding after the 64-bit __kernel_time_t fields, After __BITS_PER_LONG got changed to 32, applications would observe extra padding. In other parts of the uapi headers we seem to have a mix of those expecting either 32 or 64 on x32 applications, so we can't easily revert the path that broke these two structures. Instead, this patch decouples x32 from the other architectures and moves it back into arch specific headers, partially reverting the even older commit 73a2d096fdf2 ("x86: remove all now-duplicate header files"). It's not clear whether this ever made any difference, since at least glibc carries its own (correct) copy of both of these header files, so possibly no application has ever observed the definitions here. Based on a suggestion from H.J. Lu, I tried out the tool from https://github.com/hjl-tools/linux-header to find other such bugs, which pointed out the same bug in statfs(), which also has a separate (correct) copy in glibc. Fixes: f4b4aae18288 ("x86/headers/uapi: Fix __BITS_PER_LONG value for x32 builds") Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Cc: "H . J . Lu" Cc: Jeffrey Walton Cc: stable@vger.kernel.org Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/20180424212013.3967461-1-arnd@arndb.de --- arch/x86/include/uapi/asm/msgbuf.h | 31 ++++++++++++++++++++++ arch/x86/include/uapi/asm/shmbuf.h | 42 ++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index 809134c644a6..90ab9a795b49 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -1 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X64_MSGBUF_H +#define __ASM_X64_MSGBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include +#else +/* + * The msqid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ + __kernel_ulong_t msg_qnum; /* number of messages in queue */ + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +#endif + +#endif /* __ASM_GENERIC_MSGBUF_H */ diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index 83c05fc2de38..644421f3823b 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -1 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X86_SHMBUF_H +#define __ASM_X86_SHMBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include +#else +/* + * The shmid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + __kernel_ulong_t shm_nattch; /* no. of current attaches */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +struct shminfo64 { + __kernel_ulong_t shmmax; + __kernel_ulong_t shmmin; + __kernel_ulong_t shmmni; + __kernel_ulong_t shmseg; + __kernel_ulong_t shmall; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +#endif + +#endif /* __ASM_X86_SHMBUF_H */