Merge branch 'x86/core' into perfcounters/core
Conflicts: arch/x86/Kconfig arch/x86/kernel/apic.c arch/x86/kernel/setup_percpu.c
This commit is contained in:
Коммит
b1864e9a1a
|
@ -158,7 +158,7 @@ Offset Proto Name Meaning
|
|||
0202/4 2.00+ header Magic signature "HdrS"
|
||||
0206/2 2.00+ version Boot protocol version supported
|
||||
0208/4 2.00+ realmode_swtch Boot loader hook (see below)
|
||||
020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete)
|
||||
020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
|
||||
020E/2 2.00+ kernel_version Pointer to kernel version string
|
||||
0210/1 2.00+ type_of_loader Boot loader identifier
|
||||
0211/1 2.00+ loadflags Boot protocol option flags
|
||||
|
@ -170,10 +170,11 @@ Offset Proto Name Meaning
|
|||
0224/2 2.01+ heap_end_ptr Free memory after setup end
|
||||
0226/2 N/A pad1 Unused
|
||||
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
|
||||
022C/4 2.03+ initrd_addr_max Highest legal initrd address
|
||||
022C/4 2.03+ ramdisk_max Highest legal initrd address
|
||||
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
|
||||
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
|
||||
0235/3 N/A pad2 Unused
|
||||
0235/1 N/A pad2 Unused
|
||||
0236/2 N/A pad3 Unused
|
||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
||||
|
@ -299,14 +300,14 @@ Protocol: 2.00+
|
|||
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
|
||||
10.17.
|
||||
|
||||
Field name: readmode_swtch
|
||||
Field name: realmode_swtch
|
||||
Type: modify (optional)
|
||||
Offset/size: 0x208/4
|
||||
Protocol: 2.00+
|
||||
|
||||
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
|
||||
|
||||
Field name: start_sys
|
||||
Field name: start_sys_seg
|
||||
Type: read
|
||||
Offset/size: 0x20c/2
|
||||
Protocol: 2.00+
|
||||
|
@ -468,7 +469,7 @@ Protocol: 2.02+
|
|||
zero, the kernel will assume that your boot loader does not support
|
||||
the 2.02+ protocol.
|
||||
|
||||
Field name: initrd_addr_max
|
||||
Field name: ramdisk_max
|
||||
Type: read
|
||||
Offset/size: 0x22c/4
|
||||
Protocol: 2.03+
|
||||
|
|
3
Makefile
3
Makefile
|
@ -532,8 +532,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
|
|||
endif
|
||||
|
||||
# Force gcc to behave correct even for buggy distributions
|
||||
# Arch Makefiles may override this setting
|
||||
ifndef CONFIG_CC_STACKPROTECTOR
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FRAME_POINTER
|
||||
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
|
|
|
@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
|
|||
|
||||
#else /* ! SMP */
|
||||
|
||||
#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
|
||||
|
||||
#define per_cpu_init() (__phys_per_cpu_start)
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
|
||||
/*
|
||||
* Be extremely careful when taking the address of this variable! Due to virtual
|
||||
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#ifndef _ASM_IA64_UV_UV_H
|
||||
#define _ASM_IA64_UV_UV_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/sn/simulator.h>
|
||||
|
||||
static inline int is_uv_system(void)
|
||||
{
|
||||
/* temporary support for running on hardware simulator */
|
||||
return IS_MEDUSA() || ia64_platform_is("uv");
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_UV_UV_H */
|
|
@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
|
|||
return __va(phys_addr);
|
||||
}
|
||||
|
||||
char *__init __acpi_unmap_table(unsigned long virt_addr, unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Boot-time Table Parsing
|
||||
-------------------------------------------------------------------------- */
|
||||
|
|
603
arch/x86/Kconfig
603
arch/x86/Kconfig
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -50,7 +50,7 @@ config M386
|
|||
config M486
|
||||
bool "486"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a 486 series processor, either Intel or one of the
|
||||
compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
|
||||
DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
|
||||
|
@ -59,7 +59,7 @@ config M486
|
|||
config M586
|
||||
bool "586/K5/5x86/6x86/6x86MX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an 586 or 686 series processor such as the AMD K5,
|
||||
the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
|
||||
assume the RDTSC (Read Time Stamp Counter) instruction.
|
||||
|
@ -67,21 +67,21 @@ config M586
|
|||
config M586TSC
|
||||
bool "Pentium-Classic"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium Classic processor with the RDTSC (Read
|
||||
Time Stamp Counter) instruction for benchmarking.
|
||||
|
||||
config M586MMX
|
||||
bool "Pentium-MMX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium with the MMX graphics/multimedia
|
||||
extended instructions.
|
||||
|
||||
config M686
|
||||
bool "Pentium-Pro"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium Pro chips. This enables the use of
|
||||
Pentium Pro extended instructions, and disables the init-time guard
|
||||
against the f00f bug found in earlier Pentiums.
|
||||
|
@ -89,7 +89,7 @@ config M686
|
|||
config MPENTIUMII
|
||||
bool "Pentium-II/Celeron(pre-Coppermine)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-II and
|
||||
pre-Coppermine Celeron core. This option enables an unaligned
|
||||
copy optimization, compiles the kernel with optimization flags
|
||||
|
@ -99,7 +99,7 @@ config MPENTIUMII
|
|||
config MPENTIUMIII
|
||||
bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-III and
|
||||
Celeron-Coppermine core. This option enables use of some
|
||||
extended prefetch instructions in addition to the Pentium II
|
||||
|
@ -108,14 +108,14 @@ config MPENTIUMIII
|
|||
config MPENTIUMM
|
||||
bool "Pentium M"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium M (not Pentium-4 M)
|
||||
notebook chips.
|
||||
|
||||
config MPENTIUM4
|
||||
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium 4 chips. This includes the
|
||||
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
|
||||
Pentium-4 M (not Pentium M) chips. This option enables compile
|
||||
|
@ -151,7 +151,7 @@ config MPENTIUM4
|
|||
config MK6
|
||||
bool "K6/K6-II/K6-III"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD K6-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
@ -159,14 +159,14 @@ config MK6
|
|||
config MK7
|
||||
bool "Athlon/Duron/K7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Athlon K7-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
bool "Opteron/Athlon64/Hammer/K8"
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
@ -174,7 +174,7 @@ config MK8
|
|||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Crusoe processor. Treats the processor
|
||||
like a 586 with TSC, and sets some GCC optimization flags (like a
|
||||
Pentium Pro with no alignment requirements).
|
||||
|
@ -182,13 +182,13 @@ config MCRUSOE
|
|||
config MEFFICEON
|
||||
bool "Efficeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Efficeon processor.
|
||||
|
||||
config MWINCHIPC6
|
||||
bool "Winchip-C6"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip C6 chip. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements.
|
||||
|
@ -196,7 +196,7 @@ config MWINCHIPC6
|
|||
config MWINCHIP3D
|
||||
bool "Winchip-2/Winchip-2A/Winchip-3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements. Also enable out of order memory
|
||||
|
@ -206,19 +206,19 @@ config MWINCHIP3D
|
|||
config MGEODEGX1
|
||||
bool "GeodeGX1"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Geode GX1 (Cyrix MediaGX) chip.
|
||||
|
||||
config MGEODE_LX
|
||||
bool "Geode GX/LX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for AMD Geode GX and LX processors.
|
||||
|
||||
config MCYRIXIII
|
||||
bool "CyrixIII/VIA-C3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Cyrix III or C3 chip. Presently Linux and GCC
|
||||
treat this chip as a generic 586. Whilst the CPU is 686 class,
|
||||
it lacks the cmov extension which gcc assumes is present when
|
||||
|
@ -230,7 +230,7 @@ config MCYRIXIII
|
|||
config MVIAC3_2
|
||||
bool "VIA C3-2 (Nehemiah)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C3 "Nehemiah". Selecting this enables usage
|
||||
of SSE and tells gcc to treat the CPU as a 686.
|
||||
Note, this kernel will not boot on older (pre model 9) C3s.
|
||||
|
@ -238,14 +238,14 @@ config MVIAC3_2
|
|||
config MVIAC7
|
||||
bool "VIA C7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C7. Selecting this uses the correct cache
|
||||
shift and tells gcc to treat the CPU as a 686.
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
Xeon CPUs with Intel 64bit which is compatible with x86-64.
|
||||
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
|
||||
|
@ -255,7 +255,7 @@ config MPSC
|
|||
|
||||
config MCORE2
|
||||
bool "Core 2/newer Xeon"
|
||||
help
|
||||
---help---
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
53xx) CPUs. You can distinguish newer from older Xeons by the CPU
|
||||
|
@ -265,7 +265,7 @@ config MCORE2
|
|||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
|
@ -274,7 +274,7 @@ endchoice
|
|||
config X86_GENERIC
|
||||
bool "Generic x86 support"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Instead of just including optimizations for the selected
|
||||
x86 variant (e.g. PII, Crusoe or Athlon), include some more
|
||||
generic optimizations as well. This will make the kernel
|
||||
|
@ -319,7 +319,7 @@ config X86_XADD
|
|||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
|
||||
help
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
|
@ -412,14 +412,14 @@ config X86_DEBUGCTLMSR
|
|||
|
||||
menuconfig PROCESSOR_SELECT
|
||||
bool "Supported processor vendors" if EMBEDDED
|
||||
help
|
||||
---help---
|
||||
This lets you choose what x86 vendor support code your kernel
|
||||
will include.
|
||||
|
||||
config CPU_SUP_INTEL
|
||||
default y
|
||||
bool "Support Intel processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Intel processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -433,7 +433,7 @@ config CPU_SUP_CYRIX_32
|
|||
default y
|
||||
bool "Support Cyrix processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Cyrix processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -446,7 +446,7 @@ config CPU_SUP_CYRIX_32
|
|||
config CPU_SUP_AMD
|
||||
default y
|
||||
bool "Support AMD processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for AMD processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -460,7 +460,7 @@ config CPU_SUP_CENTAUR_32
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -474,7 +474,7 @@ config CPU_SUP_CENTAUR_64
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on 64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -488,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
|
|||
default y
|
||||
bool "Support Transmeta processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Transmeta processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -502,7 +502,7 @@ config CPU_SUP_UMC_32
|
|||
default y
|
||||
bool "Support UMC processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for UMC processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -521,7 +521,7 @@ config X86_PTRACE_BTS
|
|||
bool "Branch Trace Store"
|
||||
default y
|
||||
depends on X86_DEBUGCTLMSR
|
||||
help
|
||||
---help---
|
||||
This adds a ptrace interface to the hardware's branch trace store.
|
||||
|
||||
Debuggers may use it to collect an execution trace of the debugged
|
||||
|
|
|
@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
|
|||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
help
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
|
@ -25,7 +25,7 @@ config STRICT_DEVMEM
|
|||
config X86_VERBOSE_BOOTUP
|
||||
bool "Enable verbose x86 bootup info messages"
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Enables the informational output from the decompression stage
|
||||
(e.g. bzImage) of the boot. If you disable this you will still
|
||||
see errors. Disable this if you want silent bootup.
|
||||
|
@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
|
|||
config EARLY_PRINTK
|
||||
bool "Early printk" if EMBEDDED
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the VGA buffer or to a serial
|
||||
port.
|
||||
|
||||
|
@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
|
|||
bool "Early printk via EHCI debug port"
|
||||
default n
|
||||
depends on EARLY_PRINTK && PCI
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the EHCI debug port.
|
||||
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
|
@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
|
|||
config DEBUG_STACKOVERFLOW
|
||||
bool "Check for stack overflows"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
This option will cause messages to be printed if free stack space
|
||||
drops below a certain limit.
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Enables the display of the minimum amount of free stack which each
|
||||
task has ever had available in the sysrq-T and sysrq-P debug output.
|
||||
|
||||
|
@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
|
|||
config DEBUG_PAGEALLOC
|
||||
bool "Debug page memory allocations"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Unmap pages from the kernel linear mapping after free_pages().
|
||||
This results in a large slowdown, but helps to find certain types
|
||||
of memory corruptions.
|
||||
|
@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
|
|||
config DEBUG_PER_CPU_MAPS
|
||||
bool "Debug access to per_cpu maps"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86_SMP
|
||||
depends on SMP
|
||||
default n
|
||||
help
|
||||
---help---
|
||||
Say Y to verify that the per_cpu map being accessed has
|
||||
been setup. Adds a fair amount of code to kernel memory
|
||||
and decreases performance.
|
||||
|
@ -96,7 +96,7 @@ config X86_PTDUMP
|
|||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
select DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
Say Y here if you want to show the kernel pagetable layout in a
|
||||
debugfs file. This information is only useful for kernel developers
|
||||
who are working in architecture specific areas of the kernel.
|
||||
|
@ -108,7 +108,7 @@ config DEBUG_RODATA
|
|||
bool "Write protect kernel read-only data structures"
|
||||
default y
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Mark the kernel read-only data as write-protected in the pagetables,
|
||||
in order to catch accidental (and incorrect) writes to such const
|
||||
data. This is recommended so that we can catch kernel bugs sooner.
|
||||
|
@ -118,7 +118,7 @@ config DEBUG_RODATA_TEST
|
|||
bool "Testcase for the DEBUG_RODATA feature"
|
||||
depends on DEBUG_RODATA
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
This option enables a testcase for the DEBUG_RODATA
|
||||
feature as well as for the change_page_attr() infrastructure.
|
||||
If in doubt, say "N"
|
||||
|
@ -126,7 +126,7 @@ config DEBUG_RODATA_TEST
|
|||
config DEBUG_NX_TEST
|
||||
tristate "Testcase for the NX non-executable stack feature"
|
||||
depends on DEBUG_KERNEL && m
|
||||
help
|
||||
---help---
|
||||
This option enables a testcase for the CPU NX capability
|
||||
and the software setup of this feature.
|
||||
If in doubt, say "N"
|
||||
|
@ -134,7 +134,7 @@ config DEBUG_NX_TEST
|
|||
config 4KSTACKS
|
||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
If you say Y here the kernel will use a 4Kb stacksize for the
|
||||
kernel stack attached to each process/thread. This facilitates
|
||||
running more threads on a system and also reduces the pressure
|
||||
|
@ -145,7 +145,7 @@ config DOUBLEFAULT
|
|||
default y
|
||||
bool "Enable doublefault exception handler" if EMBEDDED
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
This option allows trapping of rare doublefault exceptions that
|
||||
would otherwise cause a system to silently reboot. Disabling this
|
||||
option saves about 4k and might cause you much additional grey
|
||||
|
@ -155,7 +155,7 @@ config IOMMU_DEBUG
|
|||
bool "Enable IOMMU debugging"
|
||||
depends on GART_IOMMU && DEBUG_KERNEL
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Force the IOMMU to on even when you have less than 4GB of
|
||||
memory and add debugging code. On overflow always panic. And
|
||||
allow to enable IOMMU leak tracing. Can be disabled at boot
|
||||
|
@ -171,7 +171,7 @@ config IOMMU_LEAK
|
|||
bool "IOMMU leak tracing"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on IOMMU_DEBUG
|
||||
help
|
||||
---help---
|
||||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||
|
||||
|
@ -224,25 +224,25 @@ choice
|
|||
|
||||
config IO_DELAY_0X80
|
||||
bool "port 0x80 based port-IO delay [recommended]"
|
||||
help
|
||||
---help---
|
||||
This is the traditional Linux IO delay used for in/out_p.
|
||||
It is the most tested hence safest selection here.
|
||||
|
||||
config IO_DELAY_0XED
|
||||
bool "port 0xed based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use port 0xed as the IO delay. This frees up port 0x80 which is
|
||||
often used as a hardware-debug port.
|
||||
|
||||
config IO_DELAY_UDELAY
|
||||
bool "udelay based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use udelay(2) as the IO delay method. This provides the delay
|
||||
while not having any side-effect on the IO port space.
|
||||
|
||||
config IO_DELAY_NONE
|
||||
bool "no port-IO delay"
|
||||
help
|
||||
---help---
|
||||
No port-IO delay. Will break on old boxes that require port-IO
|
||||
delay for certain operations. Should work on most new machines.
|
||||
|
||||
|
@ -276,18 +276,18 @@ config DEBUG_BOOT_PARAMS
|
|||
bool "Debug boot parameters"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
This option will cause struct boot_params to be exported via debugfs.
|
||||
|
||||
config CPA_DEBUG
|
||||
bool "CPA self-test code"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Do change_page_attr() self-tests every 30 seconds.
|
||||
|
||||
config OPTIMIZE_INLINING
|
||||
bool "Allow gcc to uninline functions marked 'inline'"
|
||||
help
|
||||
---help---
|
||||
This option determines if the kernel forces gcc to inline the functions
|
||||
developers have marked 'inline'. Doing so takes away freedom from gcc to
|
||||
do what it thinks is best, which is desirable for the gcc 3.x series of
|
||||
|
@ -300,4 +300,3 @@ config OPTIMIZE_INLINING
|
|||
If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -70,14 +70,17 @@ else
|
|||
# this works around some issues with generating unwind tables in older gccs
|
||||
# newer gccs do it by default
|
||||
KBUILD_CFLAGS += -maccumulate-outgoing-args
|
||||
endif
|
||||
|
||||
stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
|
||||
"$(CC)" "-fstack-protector -DGCC_HAS_SP" )
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
|
||||
"$(CC)" -fstack-protector-all )
|
||||
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
ifdef CONFIG_CC_STACKPROTECTOR
|
||||
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
|
||||
stackp-y := -fstack-protector
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
else
|
||||
$(warning stack protector enabled but no compiler support)
|
||||
endif
|
||||
endif
|
||||
|
||||
# Stackpointer is addressed different for 32 bit and 64 bit x86
|
||||
|
@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
# prevent gcc from generating any FP code by mistake
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
|
||||
|
||||
###
|
||||
# Sub architecture support
|
||||
# fcore-y is linked before mcore-y files.
|
||||
|
||||
# Default subarch .c files
|
||||
mcore-y := arch/x86/mach-default/
|
||||
|
||||
# Voyager subarch support
|
||||
mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
|
||||
mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
|
||||
|
||||
# generic subarchitecture
|
||||
mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
|
||||
fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
|
||||
mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
|
||||
|
||||
# default subarch .h files
|
||||
mflags-y += -Iarch/x86/include/asm/mach-default
|
||||
|
||||
# 64 bit does not support subarch support - clear sub arch variables
|
||||
fcore-$(CONFIG_X86_64) :=
|
||||
mcore-$(CONFIG_X86_64) :=
|
||||
|
||||
KBUILD_CFLAGS += $(mflags-y)
|
||||
KBUILD_AFLAGS += $(mflags-y)
|
||||
|
||||
|
@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
|
|||
core-y += arch/x86/kernel/
|
||||
core-y += arch/x86/mm/
|
||||
|
||||
# Remaining sub architecture files
|
||||
core-y += $(mcore-y)
|
||||
|
||||
core-y += arch/x86/crypto/
|
||||
core-y += arch/x86/vdso/
|
||||
core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
||||
* Copyright 2009 Intel Corporation
|
||||
*
|
||||
* This file is part of the Linux kernel, and is made available under
|
||||
* the terms of the GNU General Public License version 2.
|
||||
|
@ -15,16 +16,23 @@
|
|||
#include "boot.h"
|
||||
|
||||
#define MAX_8042_LOOPS 100000
|
||||
#define MAX_8042_FF 32
|
||||
|
||||
static int empty_8042(void)
|
||||
{
|
||||
u8 status;
|
||||
int loops = MAX_8042_LOOPS;
|
||||
int ffs = MAX_8042_FF;
|
||||
|
||||
while (loops--) {
|
||||
io_delay();
|
||||
|
||||
status = inb(0x64);
|
||||
if (status == 0xff) {
|
||||
/* FF is a plausible, but very unlikely status */
|
||||
if (!--ffs)
|
||||
return -1; /* Assume no KBC present */
|
||||
}
|
||||
if (status & 1) {
|
||||
/* Read and discard input data */
|
||||
io_delay();
|
||||
|
@ -118,44 +126,43 @@ static void enable_a20_fast(void)
|
|||
|
||||
int enable_a20(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_ELAN)
|
||||
/* Elan croaks if we try to touch the KBC */
|
||||
enable_a20_fast();
|
||||
while (!a20_test_long())
|
||||
;
|
||||
return 0;
|
||||
#elif defined(CONFIG_X86_VOYAGER)
|
||||
#ifdef CONFIG_X86_VOYAGER
|
||||
/* On Voyager, a20_test() is unsafe? */
|
||||
enable_a20_kbc();
|
||||
return 0;
|
||||
#else
|
||||
int loops = A20_ENABLE_LOOPS;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
int kbc_err;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
kbc_err = empty_8042();
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
empty_8042();
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
if (!kbc_err) {
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
|||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
put_user_try {
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
put_user_ex(from->si_signo, &to->si_signo);
|
||||
put_user_ex(from->si_errno, &to->si_errno);
|
||||
put_user_ex((short)from->si_code, &to->si_code);
|
||||
|
||||
if (from->si_code < 0) {
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
err |= __put_user(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
if (from->si_code < 0) {
|
||||
put_user_ex(from->si_pid, &to->si_pid);
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
put_user_ex(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
put_user_ex(from->si_utime, &to->si_utime);
|
||||
put_user_ex(from->si_stime, &to->si_stime);
|
||||
put_user_ex(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
put_user_ex(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
put_user_ex(from->si_overrun, &to->si_overrun);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(from->si_int, &to->si_int);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 ptr32;
|
||||
|
||||
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
get_user_try {
|
||||
get_user_ex(to->si_signo, &from->si_signo);
|
||||
get_user_ex(to->si_errno, &from->si_errno);
|
||||
get_user_ex(to->si_code, &from->si_code);
|
||||
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
get_user_ex(to->si_pid, &from->si_pid);
|
||||
get_user_ex(to->si_uid, &from->si_uid);
|
||||
get_user_ex(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
} get_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
int ret, err = 0;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (uss_ptr) {
|
||||
u32 ptr;
|
||||
|
||||
memset(&uss, 0, sizeof(stack_t));
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__get_user(ptr, &uss_ptr->ss_sp) ||
|
||||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
|
||||
__get_user(uss.ss_size, &uss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
get_user_try {
|
||||
get_user_ex(ptr, &uss_ptr->ss_sp);
|
||||
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
|
||||
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
|
||||
} get_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
uss.ss_sp = compat_ptr(ptr);
|
||||
}
|
||||
|
@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
|
||||
set_fs(seg);
|
||||
if (ret >= 0 && uoss_ptr) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
|
||||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
|
||||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
|
||||
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
|
||||
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
|
@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
#define COPY(x) { \
|
||||
err |= __get_user(regs->x, &sc->x); \
|
||||
get_user_ex(regs->x, &sc->x); \
|
||||
}
|
||||
|
||||
#define COPY_SEG_CPL3(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
regs->seg = tmp | 3; \
|
||||
}
|
||||
|
||||
#define RELOAD_SEG(seg) { \
|
||||
unsigned int cur, pre; \
|
||||
err |= __get_user(pre, &sc->seg); \
|
||||
get_user_ex(pre, &sc->seg); \
|
||||
savesegment(seg, cur); \
|
||||
pre |= 3; \
|
||||
if (pre != cur) \
|
||||
|
@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
|||
sc, sc->err, sc->ip, sc->cs, sc->flags);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
err |= __get_user(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
get_user_try {
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
get_user_ex(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
|
||||
err |= __get_user(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
get_user_ex(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
|
||||
err |= __get_user(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
get_user_ex(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= __get_user(*pax, &sc->ax);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||
{
|
||||
int tmp, err = 0;
|
||||
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
|
||||
put_user_try {
|
||||
savesegment(gs, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->es);
|
||||
|
||||
err |= __put_user(regs->di, &sc->di);
|
||||
err |= __put_user(regs->si, &sc->si);
|
||||
err |= __put_user(regs->bp, &sc->bp);
|
||||
err |= __put_user(regs->sp, &sc->sp);
|
||||
err |= __put_user(regs->bx, &sc->bx);
|
||||
err |= __put_user(regs->dx, &sc->dx);
|
||||
err |= __put_user(regs->cx, &sc->cx);
|
||||
err |= __put_user(regs->ax, &sc->ax);
|
||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||
err |= __put_user(current->thread.error_code, &sc->err);
|
||||
err |= __put_user(regs->ip, &sc->ip);
|
||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
put_user_ex(regs->di, &sc->di);
|
||||
put_user_ex(regs->si, &sc->si);
|
||||
put_user_ex(regs->bp, &sc->bp);
|
||||
put_user_ex(regs->sp, &sc->sp);
|
||||
put_user_ex(regs->bx, &sc->bx);
|
||||
put_user_ex(regs->dx, &sc->dx);
|
||||
put_user_ex(regs->cx, &sc->cx);
|
||||
put_user_ex(regs->ax, &sc->ax);
|
||||
put_user_ex(current->thread.trap_no, &sc->trapno);
|
||||
put_user_ex(current->thread.error_code, &sc->err);
|
||||
put_user_ex(regs->ip, &sc->ip);
|
||||
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->sp, &sc->sp_at_signal);
|
||||
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
|
||||
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
err |= __put_user(current->thread.cr2, &sc->cr2);
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
put_user_ex(current->thread.cr2, &sc->cr2);
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
else
|
||||
restorer = &frame->retcode;
|
||||
}
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
put_user_try {
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
|
|||
dump->regs.ds = (u16)regs->ds;
|
||||
dump->regs.es = (u16)regs->es;
|
||||
dump->regs.fs = (u16)regs->fs;
|
||||
savesegment(gs, dump->regs.gs);
|
||||
dump->regs.gs = get_user_gs(regs);
|
||||
dump->regs.orig_ax = regs->orig_ax;
|
||||
dump->regs.ip = regs->ip;
|
||||
dump->regs.cs = (u16)regs->cs;
|
||||
|
|
|
@ -102,9 +102,6 @@ static inline void disable_acpi(void)
|
|||
acpi_noirq = 1;
|
||||
}
|
||||
|
||||
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
|
||||
#define FIX_ACPI_PAGES 4
|
||||
|
||||
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
|
||||
|
||||
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
|
||||
|
|
|
@ -33,7 +33,13 @@
|
|||
} while (0)
|
||||
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
extern void generic_apic_probe(void);
|
||||
#else
|
||||
static inline void generic_apic_probe(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
|
@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
|
|||
extern int local_apic_timer_c2_ok;
|
||||
|
||||
extern int disable_apic;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void __inquire_remote_apic(int apicid)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline void default_inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic functions accessing APICs.
|
||||
*/
|
||||
|
@ -124,12 +145,35 @@ struct apic_ops {
|
|||
|
||||
extern struct apic_ops *apic_ops;
|
||||
|
||||
#define apic_read (apic_ops->read)
|
||||
#define apic_write (apic_ops->write)
|
||||
#define apic_icr_read (apic_ops->icr_read)
|
||||
#define apic_icr_write (apic_ops->icr_write)
|
||||
#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
|
||||
#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
|
||||
static inline u32 apic_read(u32 reg)
|
||||
{
|
||||
return apic_ops->read(reg);
|
||||
}
|
||||
|
||||
static inline void apic_write(u32 reg, u32 val)
|
||||
{
|
||||
apic_ops->write(reg, val);
|
||||
}
|
||||
|
||||
static inline u64 apic_icr_read(void)
|
||||
{
|
||||
return apic_ops->icr_read();
|
||||
}
|
||||
|
||||
static inline void apic_icr_write(u32 low, u32 high)
|
||||
{
|
||||
apic_ops->icr_write(low, high);
|
||||
}
|
||||
|
||||
static inline void apic_wait_icr_idle(void)
|
||||
{
|
||||
apic_ops->wait_icr_idle();
|
||||
}
|
||||
|
||||
static inline u32 safe_apic_wait_icr_idle(void)
|
||||
{
|
||||
return apic_ops->safe_wait_icr_idle();
|
||||
}
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
|
@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
|
|||
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define SET_APIC_ID(x) (apic->set_apic_id(x))
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
||||
if (APIC_XAPIC(ver))
|
||||
return (x >> 24) & 0xFF;
|
||||
else
|
||||
return (x >> 24) & 0x0F;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APIC_H */
|
||||
|
|
|
@ -1,155 +0,0 @@
|
|||
#ifndef __ASM_MACH_APIC_H
|
||||
#define __ASM_MACH_APIC_H
|
||||
|
||||
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
|
||||
#define esr_disable (1)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return &cpu_online_map;
|
||||
#else
|
||||
return &cpumask_of_cpu(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target proc */
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
id = xapic_phys_to_log_apicid(cpu);
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"Physflat", nr_ioapics);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xFFL);
|
||||
}
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask)
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_MACH_APICDEF_H
|
||||
#define __ASM_MACH_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0xFF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_MACH_IPI_H
|
||||
#define __ASM_MACH_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_IPI_H */
|
|
@ -1,5 +1,55 @@
|
|||
/*
|
||||
* Some macros to handle stack frames in assembly.
|
||||
|
||||
x86 function call convention, 64-bit:
|
||||
-------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
---------------------------------------------------------------------------
|
||||
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
|
||||
|
||||
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
|
||||
functions when it sees tail-call optimization possibilities) rflags is
|
||||
clobbered. Leftover arguments are passed over the stack frame.)
|
||||
|
||||
[*] In the frame-pointers case rbp is fixed to the stack frame.
|
||||
|
||||
[**] for struct return values wider than 64 bits the return convention is a
|
||||
bit more complex: up to 128 bits width we return small structures
|
||||
straight in rax, rdx. For structures larger than that (3 words or
|
||||
larger) the caller puts a pointer to an on-stack return struct
|
||||
[allocated in the caller's stack frame] into the first argument - i.e.
|
||||
into rdi. All other arguments shift up by one in this case.
|
||||
Fortunately this case is rare in the kernel.
|
||||
|
||||
For 32-bit we have the following conventions - kernel is built with
|
||||
-mregparm=3 and -freg-struct-return:
|
||||
|
||||
x86 function calling convention, 32-bit:
|
||||
----------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
-------------------------------------------------------------------------
|
||||
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
|
||||
|
||||
( here too esp is obviously invariant across normal function calls. eflags
|
||||
is clobbered. Leftover arguments are passed over the stack frame. )
|
||||
|
||||
[*] In the frame-pointers case ebp is fixed to the stack frame.
|
||||
|
||||
[**] We build with -freg-struct-return, which on 32-bit means similar
|
||||
semantics as on 64-bit: edx can be used for a second return value
|
||||
(i.e. covering integer and structure sizes up to 64 bits) - after that
|
||||
it gets more complex and more expensive: 3-word or larger struct returns
|
||||
get done in the caller's frame and the pointer to the return struct goes
|
||||
into regparm0, i.e. eax - the other arguments shift up and the
|
||||
function's register parameters degenerate to regparm=2 in essence.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* 64-bit system call stack frame layout defines and helpers,
|
||||
* for assembly code:
|
||||
*/
|
||||
|
||||
#define R15 0
|
||||
|
@ -9,7 +59,7 @@
|
|||
#define RBP 32
|
||||
#define RBX 40
|
||||
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here: */
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@ -22,7 +72,7 @@
|
|||
#define ORIG_RAX 120 /* + error_code */
|
||||
/* end of arguments */
|
||||
|
||||
/* cpu exception frame or undefined in case of fast syscall. */
|
||||
/* cpu exception frame or undefined in case of fast syscall: */
|
||||
#define RIP 128
|
||||
#define CS 136
|
||||
#define EFLAGS 144
|
||||
|
|
|
@ -32,10 +32,6 @@ extern void arch_unregister_cpu(int);
|
|||
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
|
||||
extern unsigned char boot_cpu_id;
|
||||
#else
|
||||
#define boot_cpu_id 0
|
||||
#endif
|
||||
extern unsigned int boot_cpu_id;
|
||||
|
||||
#endif /* _ASM_X86_CPU_H */
|
||||
|
|
|
@ -10,6 +10,8 @@ extern cpumask_var_t cpu_callout_mask;
|
|||
extern cpumask_var_t cpu_initialized_mask;
|
||||
extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern cpumask_t cpu_callin_map;
|
||||
|
@ -22,6 +24,8 @@ extern cpumask_t cpu_sibling_setup_map;
|
|||
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
|
||||
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
|
||||
|
||||
static inline void setup_cpu_local_masks(void) { }
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
|
|||
* now struct_user_regs, they are different)
|
||||
*/
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
|
||||
do { \
|
||||
pr_reg[0] = regs->bx; \
|
||||
pr_reg[1] = regs->cx; \
|
||||
|
@ -124,7 +124,6 @@ do { \
|
|||
pr_reg[7] = regs->ds & 0xffff; \
|
||||
pr_reg[8] = regs->es & 0xffff; \
|
||||
pr_reg[9] = regs->fs & 0xffff; \
|
||||
savesegment(gs, pr_reg[10]); \
|
||||
pr_reg[11] = regs->orig_ax; \
|
||||
pr_reg[12] = regs->ip; \
|
||||
pr_reg[13] = regs->cs & 0xffff; \
|
||||
|
@ -133,6 +132,18 @@ do { \
|
|||
pr_reg[16] = regs->ss & 0xffff; \
|
||||
} while (0);
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
do { \
|
||||
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
|
||||
pr_reg[10] = get_user_gs(regs); \
|
||||
} while (0);
|
||||
|
||||
#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
|
||||
do { \
|
||||
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
|
||||
savesegment(gs, pr_reg[10]); \
|
||||
} while (0);
|
||||
|
||||
#define ELF_PLATFORM (utsname()->machine)
|
||||
#define set_personality_64bit() do { } while (0)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* is no hardware IRQ pin equivalent for them, they are triggered
|
||||
* through the ICC by us (IPIs)
|
||||
*/
|
||||
#ifdef CONFIG_X86_SMP
|
||||
#ifdef CONFIG_SMP
|
||||
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
|
@ -1,242 +0,0 @@
|
|||
#ifndef __ASM_ES7000_APIC_H
|
||||
#define __ASM_ES7000_APIC_H
|
||||
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
|
||||
#define esr_disable (1)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus_cluster(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
return &cpumask_of_cpu(smp_processor_id());
|
||||
}
|
||||
|
||||
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
|
||||
#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
|
||||
#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
|
||||
#define NO_BALANCE_IRQ_CLUSTER (1)
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target procs */
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0x0
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
#define apicid_cluster(apicid) (apicid & 0xF0)
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long id;
|
||||
id = xapic_phys_to_log_apicid(cpu);
|
||||
return (SET_APIC_LOGICAL_ID(id));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LdR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr_cluster(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
extern int apic_version [MAX_APICS];
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster",
|
||||
nr_ioapics, cpus_addr(*target_cpus())[0]);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (!mps_cpu)
|
||||
return boot_cpu_physical_apicid;
|
||||
else if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
static int id = 0;
|
||||
physid_mask_t mask;
|
||||
mask = physid_mask_of_physid(id);
|
||||
++id;
|
||||
return mask;
|
||||
}
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xff);
|
||||
}
|
||||
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
extern unsigned int boot_cpu_physical_apicid;
|
||||
static inline int check_phys_apicid_present(int cpu_physical_apicid)
|
||||
{
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return 0xFF;
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(0);
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, *cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return cpu_to_logical_apicid(0);
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = cpu_to_logical_apicid(0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return apicid;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = cpu_mask_to_apicid(cpumask);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_ES7000_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_ES7000_APICDEF_H
|
||||
#define __ASM_ES7000_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0xFF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_ES7000_IPI_H
|
||||
#define __ASM_ES7000_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_ES7000_IPI_H */
|
|
@ -1,29 +0,0 @@
|
|||
#ifndef __ASM_ES7000_MPPARSE_H
|
||||
#define __ASM_ES7000_MPPARSE_H
|
||||
|
||||
#include <linux/acpi.h>
|
||||
|
||||
extern int parse_unisys_oem (char *oemptr);
|
||||
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
|
||||
extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
|
||||
extern void setup_unisys(void);
|
||||
|
||||
#ifndef CONFIG_X86_GENERICARCH
|
||||
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
|
||||
extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static inline int es7000_check_dsdt(void)
|
||||
{
|
||||
struct acpi_table_header header;
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
|
||||
!strncmp(header.oem_id, "UNISYS", 6))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_MACH_MPPARSE_H */
|
|
@ -1,37 +0,0 @@
|
|||
#ifndef __ASM_ES7000_WAKECPU_H
|
||||
#define __ASM_ES7000_WAKECPU_H
|
||||
|
||||
#define TRAMPOLINE_PHYS_LOW 0x467
|
||||
#define TRAMPOLINE_PHYS_HIGH 0x469
|
||||
|
||||
static inline void wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
#ifndef CONFIG_ES7000_CLUSTERED_APIC
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/* Nothing to do for most platforms, since cleared by the INIT cycle */
|
||||
static inline void smp_callin_clear_local_apic(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
|
||||
static inline void inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_WAKECPU_H */
|
|
@ -95,10 +95,6 @@ enum fixed_addresses {
|
|||
(__end_of_permanent_fixed_addresses & 255),
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
|
||||
FIX_WP_TEST,
|
||||
#ifdef CONFIG_ACPI
|
||||
FIX_ACPI_BEGIN,
|
||||
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
|
|
|
@ -50,10 +50,6 @@ enum fixed_addresses {
|
|||
FIX_PARAVIRT_BOOTMAP,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
#ifdef CONFIG_ACPI
|
||||
FIX_ACPI_BEGIN,
|
||||
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
|
|
|
@ -1,5 +1,263 @@
|
|||
#ifndef _ASM_X86_GENAPIC_H
|
||||
#define _ASM_X86_GENAPIC_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
*
|
||||
* Generic APIC sub-arch data struct.
|
||||
*
|
||||
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
|
||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||
* James Cleverdon.
|
||||
*/
|
||||
struct genapic {
|
||||
char *name;
|
||||
|
||||
int (*probe)(void);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
int (*apic_id_registered)(void);
|
||||
|
||||
u32 irq_delivery_mode;
|
||||
u32 irq_dest_mode;
|
||||
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
|
||||
int disable_esr;
|
||||
|
||||
int dest_logical;
|
||||
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
|
||||
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
|
||||
void (*setup_portio_remap)(void);
|
||||
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
|
||||
void (*enable_apic_mode)(void);
|
||||
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
|
||||
|
||||
/*
|
||||
* When one of the next two hooks returns 1 the genapic
|
||||
* is switched to this. Essentially they are additional
|
||||
* probe functions:
|
||||
*/
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
|
||||
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
|
||||
|
||||
int trampoline_phys_low;
|
||||
int trampoline_phys_high;
|
||||
|
||||
void (*wait_for_init_deassert)(atomic_t *deassert);
|
||||
void (*smp_callin_clear_local_apic)(void);
|
||||
void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
|
||||
void (*inquire_remote_apic)(int apicid);
|
||||
};
|
||||
|
||||
extern struct genapic *apic;
|
||||
|
||||
/*
|
||||
* Warm reset vector default position:
|
||||
*/
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "genapic_32.h"
|
||||
extern void es7000_update_genapic_to_cluster(void);
|
||||
#else
|
||||
# include "genapic_64.h"
|
||||
extern struct genapic apic_flat;
|
||||
extern struct genapic apic_physflat;
|
||||
extern struct genapic apic_x2apic_cluster;
|
||||
extern struct genapic apic_x2apic_phys;
|
||||
extern int default_acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void apic_send_IPI_self(int vector);
|
||||
|
||||
extern struct genapic apic_x2apic_uv_x;
|
||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
|
||||
|
||||
extern void default_setup_apic_routing(void);
|
||||
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline void default_wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
return;
|
||||
}
|
||||
|
||||
extern void generic_bigsmp_probe(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
|
||||
static inline const struct cpumask *default_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
|
||||
|
||||
static inline unsigned int read_apic_id(void)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
reg = apic_read(APIC_ID);
|
||||
|
||||
return apic->get_apic_id(reg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void default_setup_apic_routing(void);
|
||||
#else
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
extern void default_init_apic_ldr(void);
|
||||
|
||||
static inline int default_apic_id_registered(void)
|
||||
{
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0];
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
}
|
||||
|
||||
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
static inline void default_setup_apic_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"Flat", nr_ioapics);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
}
|
||||
|
||||
static inline unsigned long default_check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
return phys_map;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static inline int default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
return __default_cpu_present_to_apicid(mps_cpu);
|
||||
}
|
||||
|
||||
static inline int
|
||||
default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
|
||||
}
|
||||
#else
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#endif /* _ASM_X86_GENAPIC_64_H */
|
||||
|
|
|
@ -1,141 +0,0 @@
|
|||
#ifndef _ASM_X86_GENAPIC_32_H
|
||||
#define _ASM_X86_GENAPIC_32_H
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Generic APIC driver interface.
|
||||
*
|
||||
* An straight forward mapping of the APIC related parts of the
|
||||
* x86 subarchitecture interface to a dynamic object.
|
||||
*
|
||||
* This is used by the "generic" x86 subarchitecture.
|
||||
*
|
||||
* Copyright 2003 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
|
||||
struct mpc_bus;
|
||||
struct mpc_table;
|
||||
struct mpc_cpu;
|
||||
|
||||
struct genapic {
|
||||
char *name;
|
||||
int (*probe)(void);
|
||||
|
||||
int (*apic_id_registered)(void);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
int int_delivery_mode;
|
||||
int int_dest_mode;
|
||||
int ESR_DISABLE;
|
||||
int apic_destination_logical;
|
||||
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
int no_balance_irq;
|
||||
int no_ioapic_check;
|
||||
void (*init_apic_ldr)(void);
|
||||
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
|
||||
void (*setup_portio_remap)(void);
|
||||
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
|
||||
void (*enable_apic_mode)(void);
|
||||
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
|
||||
|
||||
/* mpparse */
|
||||
/* When one of the next two hooks returns 1 the genapic
|
||||
is switched to this. Essentially they are additional probe
|
||||
functions. */
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
|
||||
char *productid);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
|
||||
unsigned (*get_apic_id)(unsigned long x);
|
||||
unsigned long apic_id_mask;
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
#endif
|
||||
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
|
||||
int trampoline_phys_low;
|
||||
int trampoline_phys_high;
|
||||
void (*wait_for_init_deassert)(atomic_t *deassert);
|
||||
void (*smp_callin_clear_local_apic)(void);
|
||||
void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
|
||||
void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
|
||||
void (*inquire_remote_apic)(int apicid);
|
||||
};
|
||||
|
||||
#define APICFUNC(x) .x = x,
|
||||
|
||||
/* More functions could be probably marked IPIFUNC and save some space
|
||||
in UP GENERICARCH kernels, but I don't have the nerve right now
|
||||
to untangle this mess. -AK */
|
||||
#ifdef CONFIG_SMP
|
||||
#define IPIFUNC(x) APICFUNC(x)
|
||||
#else
|
||||
#define IPIFUNC(x)
|
||||
#endif
|
||||
|
||||
#define APIC_INIT(aname, aprobe) \
|
||||
{ \
|
||||
.name = aname, \
|
||||
.probe = aprobe, \
|
||||
.int_delivery_mode = INT_DELIVERY_MODE, \
|
||||
.int_dest_mode = INT_DEST_MODE, \
|
||||
.no_balance_irq = NO_BALANCE_IRQ, \
|
||||
.ESR_DISABLE = esr_disable, \
|
||||
.apic_destination_logical = APIC_DEST_LOGICAL, \
|
||||
APICFUNC(apic_id_registered) \
|
||||
APICFUNC(target_cpus) \
|
||||
APICFUNC(check_apicid_used) \
|
||||
APICFUNC(check_apicid_present) \
|
||||
APICFUNC(init_apic_ldr) \
|
||||
APICFUNC(ioapic_phys_id_map) \
|
||||
APICFUNC(setup_apic_routing) \
|
||||
APICFUNC(multi_timer_check) \
|
||||
APICFUNC(apicid_to_node) \
|
||||
APICFUNC(cpu_to_logical_apicid) \
|
||||
APICFUNC(cpu_present_to_apicid) \
|
||||
APICFUNC(apicid_to_cpu_present) \
|
||||
APICFUNC(setup_portio_remap) \
|
||||
APICFUNC(check_phys_apicid_present) \
|
||||
APICFUNC(mps_oem_check) \
|
||||
APICFUNC(get_apic_id) \
|
||||
.apic_id_mask = APIC_ID_MASK, \
|
||||
APICFUNC(cpu_mask_to_apicid) \
|
||||
APICFUNC(cpu_mask_to_apicid_and) \
|
||||
APICFUNC(vector_allocation_domain) \
|
||||
APICFUNC(acpi_madt_oem_check) \
|
||||
IPIFUNC(send_IPI_mask) \
|
||||
IPIFUNC(send_IPI_allbutself) \
|
||||
IPIFUNC(send_IPI_all) \
|
||||
APICFUNC(enable_apic_mode) \
|
||||
APICFUNC(phys_pkg_id) \
|
||||
.trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
|
||||
.trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
|
||||
APICFUNC(wait_for_init_deassert) \
|
||||
APICFUNC(smp_callin_clear_local_apic) \
|
||||
APICFUNC(store_NMI_vector) \
|
||||
APICFUNC(restore_NMI_vector) \
|
||||
APICFUNC(inquire_remote_apic) \
|
||||
}
|
||||
|
||||
extern struct genapic *genapic;
|
||||
extern void es7000_update_genapic_to_cluster(void);
|
||||
|
||||
#endif /* _ASM_X86_GENAPIC_32_H */
|
|
@ -1,60 +0,0 @@
|
|||
#ifndef _ASM_X86_GENAPIC_64_H
|
||||
#define _ASM_X86_GENAPIC_64_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
*
|
||||
* Generic APIC sub-arch data struct.
|
||||
*
|
||||
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
|
||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||
* James Cleverdon.
|
||||
*/
|
||||
|
||||
struct genapic {
|
||||
char *name;
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
u32 int_delivery_mode;
|
||||
u32 int_dest_mode;
|
||||
int (*apic_id_registered)(void);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
/* */
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
unsigned int (*phys_pkg_id)(int index_msb);
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
|
||||
};
|
||||
|
||||
extern struct genapic *genapic;
|
||||
|
||||
extern struct genapic apic_flat;
|
||||
extern struct genapic apic_physflat;
|
||||
extern struct genapic apic_x2apic_cluster;
|
||||
extern struct genapic apic_x2apic_phys;
|
||||
extern int acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void apic_send_IPI_self(int vector);
|
||||
|
||||
extern struct genapic apic_x2apic_uv_x;
|
||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
|
||||
|
||||
extern void setup_apic_routing(void);
|
||||
|
||||
#endif /* _ASM_X86_GENAPIC_64_H */
|
|
@ -25,8 +25,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define platform_legacy_irq(irq) ((irq) < 16)
|
||||
|
||||
/* Interrupt handlers registered during init_IRQ */
|
||||
extern void apic_timer_interrupt(void);
|
||||
extern void error_interrupt(void);
|
||||
|
@ -60,7 +58,7 @@ extern void make_8259A_irq(unsigned int irq);
|
|||
extern void init_8259A(int aeoi);
|
||||
|
||||
/* IOAPIC */
|
||||
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
|
||||
#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
|
||||
extern unsigned long io_apic_irqs;
|
||||
|
||||
extern void init_VISWS_APIC_irqs(void);
|
||||
|
@ -69,15 +67,7 @@ extern void disable_IO_APIC(void);
|
|||
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
|
||||
extern void setup_ioapic_dest(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void enable_IO_APIC(void);
|
||||
#endif
|
||||
|
||||
/* IPI functions */
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void send_IPI_self(int vector);
|
||||
#endif
|
||||
extern void send_IPI(int dest, int vector);
|
||||
|
||||
/* Statistics */
|
||||
extern atomic_t irq_err_count;
|
||||
|
@ -86,21 +76,11 @@ extern atomic_t irq_mis_count;
|
|||
/* EISA */
|
||||
extern void eisa_set_level_irq(unsigned int irq);
|
||||
|
||||
/* Voyager functions */
|
||||
extern asmlinkage void vic_cpi_interrupt(void);
|
||||
extern asmlinkage void vic_sys_interrupt(void);
|
||||
extern asmlinkage void vic_cmn_interrupt(void);
|
||||
extern asmlinkage void qic_timer_interrupt(void);
|
||||
extern asmlinkage void qic_invalidate_interrupt(void);
|
||||
extern asmlinkage void qic_reschedule_interrupt(void);
|
||||
extern asmlinkage void qic_enable_irq_interrupt(void);
|
||||
extern asmlinkage void qic_call_function_interrupt(void);
|
||||
|
||||
/* SMP */
|
||||
extern void smp_apic_timer_interrupt(struct pt_regs *);
|
||||
extern void smp_spurious_interrupt(struct pt_regs *);
|
||||
extern void smp_error_interrupt(struct pt_regs *);
|
||||
#ifdef CONFIG_X86_SMP
|
||||
#ifdef CONFIG_SMP
|
||||
extern void smp_reschedule_interrupt(struct pt_regs *);
|
||||
extern void smp_call_function_interrupt(struct pt_regs *);
|
||||
extern void smp_call_function_single_interrupt(struct pt_regs *);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define build_mmio_read(name, size, type, reg, barrier) \
|
||||
static inline type name(const volatile void __iomem *addr) \
|
||||
|
@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
|
|||
#define readq readq
|
||||
#define writeq writeq
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline phys_addr_t virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "io_32.h"
|
||||
#else
|
||||
|
@ -91,7 +181,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
|||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* early_ioremap() and early_iounmap() are for temporary early boot-time
|
||||
|
@ -105,5 +195,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
|
|||
extern void early_iounmap(void __iomem *addr, unsigned long size);
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#endif /* _ASM_X86_IO_H */
|
||||
|
|
|
@ -37,8 +37,6 @@
|
|||
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*/
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#define XQUAD_PORTIO_BASE 0xfe400000
|
||||
#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
|
||||
|
||||
|
@ -53,92 +51,6 @@
|
|||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
static inline void
|
||||
memset_io(volatile void __iomem *addr, unsigned char val, int count)
|
||||
{
|
||||
|
|
|
@ -136,73 +136,12 @@ __OUTS(b)
|
|||
__OUTS(w)
|
||||
__OUTS(l)
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#if defined(__KERNEL__) && defined(__x86_64__)
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifndef __i386__
|
||||
/*
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
* These are pretty trivial
|
||||
*/
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
/*
|
||||
* This one maps high address device memory and turns off caching for that area.
|
||||
* it's useful if some control registers are in such an area and write combining
|
||||
* or read caching is not desirable:
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
void __memcpy_fromio(void *, unsigned long, unsigned);
|
||||
void __memcpy_toio(unsigned long, const void *, unsigned);
|
||||
|
||||
|
|
|
@ -143,15 +143,6 @@ extern int noioapicreroute;
|
|||
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
|
||||
extern int timer_through_8259;
|
||||
|
||||
static inline void disable_ioapic_setup(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
noioapicquirk = 1;
|
||||
noioapicreroute = -1;
|
||||
#endif
|
||||
skip_ioapic_setup = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we use the IO-APIC for IRQ routing, disable automatic
|
||||
* assignment of PCI IRQ's.
|
||||
|
@ -178,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
|
|||
|
||||
extern void probe_nr_irqs_gsi(void);
|
||||
|
||||
extern int setup_ioapic_entry(int apic, int irq,
|
||||
struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int trigger,
|
||||
int polarity, int vector);
|
||||
extern void ioapic_write_entry(int apic, int pin,
|
||||
struct IO_APIC_route_entry e);
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
static const int timer_through_8259 = 0;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_IPI_H
|
||||
#define _ASM_X86_IPI_H
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
|
@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
|
||||
unsigned int dest)
|
||||
static inline void
|
||||
__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
|
||||
{
|
||||
/*
|
||||
* Subtle. In the case of the 'never do double writes' workaround
|
||||
|
@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
|
|||
* This is used to send an IPI with no shorthand notation (the destination is
|
||||
* specified in bits 56 to 63 of the ICR).
|
||||
*/
|
||||
static inline void __send_IPI_dest_field(unsigned int mask, int vector,
|
||||
unsigned int dest)
|
||||
static inline void
|
||||
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
|
||||
{
|
||||
unsigned long cfg;
|
||||
|
||||
|
@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
|
|||
native_apic_mem_write(APIC_ICR, cfg);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_sequence(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
#include <asm/genapic.h>
|
||||
|
||||
/*
|
||||
* Hack. The clustered APIC addressing mode doesn't allow us to send
|
||||
* to an arbitrary mask, so I do a unicast to each CPU instead.
|
||||
* - mbligh
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask) {
|
||||
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
extern int no_broadcast;
|
||||
|
||||
static inline void __default_local_send_IPI_allbutself(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
else
|
||||
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
|
||||
int vector)
|
||||
static inline void __default_local_send_IPI_all(int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int query_cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
/* See Hack comment above */
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
if (query_cpu != this_cpu)
|
||||
__send_IPI_dest_field(
|
||||
per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
local_irq_restore(flags);
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
apic->send_IPI_mask(cpu_online_mask, vector);
|
||||
else
|
||||
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_allbutself(int vector);
|
||||
extern void default_send_IPI_all(int vector);
|
||||
extern void default_send_IPI_self(int vector);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IPI_H */
|
||||
|
|
|
@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
|
|||
extern void fixup_irqs(void);
|
||||
#endif
|
||||
|
||||
extern unsigned int do_IRQ(struct pt_regs *regs);
|
||||
extern void init_IRQ(void);
|
||||
extern void native_init_IRQ(void);
|
||||
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
|
||||
|
||||
extern unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
||||
/* Interrupt vector management */
|
||||
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||
|
|
|
@ -1,47 +1,69 @@
|
|||
#ifndef _ASM_X86_IRQ_VECTORS_H
|
||||
#define _ASM_X86_IRQ_VECTORS_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
/*
|
||||
* Linux IRQ vector layout.
|
||||
*
|
||||
* There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
|
||||
* be defined by Linux. They are used as a jump table by the CPU when a
|
||||
* given vector is triggered - by a CPU-external, CPU-internal or
|
||||
* software-triggered event.
|
||||
*
|
||||
* Linux sets the kernel code address each entry jumps to early during
|
||||
* bootup, and never changes them. This is the general layout of the
|
||||
* IDT entries:
|
||||
*
|
||||
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
|
||||
* Vectors 32 ... 127 : device interrupts
|
||||
* Vector 128 : legacy int80 syscall interface
|
||||
* Vectors 129 ... 237 : device interrupts
|
||||
* Vectors 238 ... 255 : special interrupts
|
||||
*
|
||||
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
|
||||
*
|
||||
* This file enumerates the exact layout of them:
|
||||
*/
|
||||
|
||||
#define NMI_VECTOR 0x02
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
/*
|
||||
* IDT vectors usable for external interrupt sources start
|
||||
* at 0x20:
|
||||
*/
|
||||
#define FIRST_EXTERNAL_VECTOR 0x20
|
||||
#define FIRST_EXTERNAL_VECTOR 0x20
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define SYSCALL_VECTOR 0x80
|
||||
# define SYSCALL_VECTOR 0x80
|
||||
#else
|
||||
# define IA32_SYSCALL_VECTOR 0x80
|
||||
# define IA32_SYSCALL_VECTOR 0x80
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
|
||||
* cleanup after irq migration.
|
||||
*/
|
||||
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
|
||||
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
|
||||
|
||||
/*
|
||||
* Vectors 0x30-0x3f are used for ISA interrupts.
|
||||
*/
|
||||
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
|
||||
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
|
||||
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
|
||||
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
|
||||
#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
|
||||
#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
|
||||
#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
|
||||
#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
|
||||
#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
|
||||
#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
|
||||
#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
|
||||
#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
|
||||
#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
|
||||
#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
|
||||
#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
|
||||
#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
|
||||
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
|
||||
|
||||
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
|
||||
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
|
||||
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
|
||||
#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
|
||||
#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
|
||||
#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
|
||||
#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
|
||||
#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
|
||||
#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
|
||||
#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
|
||||
#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
|
||||
#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
|
||||
#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
|
||||
#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
|
||||
#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
|
||||
|
||||
/*
|
||||
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
|
||||
|
@ -50,43 +72,44 @@
|
|||
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
|
||||
* TLB, reschedule and local APIC vectors are performance-critical.
|
||||
*/
|
||||
|
||||
#define SPURIOUS_APIC_VECTOR 0xff
|
||||
/*
|
||||
* Sanity check
|
||||
*/
|
||||
#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
|
||||
# error SPURIOUS_APIC_VECTOR definition error
|
||||
#endif
|
||||
|
||||
#define ERROR_APIC_VECTOR 0xfe
|
||||
#define RESCHEDULE_VECTOR 0xfd
|
||||
#define CALL_FUNCTION_VECTOR 0xfc
|
||||
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
|
||||
#define THERMAL_APIC_VECTOR 0xfa
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
# define SPURIOUS_APIC_VECTOR 0xff
|
||||
# define ERROR_APIC_VECTOR 0xfe
|
||||
# define RESCHEDULE_VECTOR 0xfd
|
||||
# define CALL_FUNCTION_VECTOR 0xfc
|
||||
# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
|
||||
# define THERMAL_APIC_VECTOR 0xfa
|
||||
/* 0xf8 - 0xf9 : free */
|
||||
# define INVALIDATE_TLB_VECTOR_END 0xf7
|
||||
# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
|
||||
|
||||
# define NUM_INVALIDATE_TLB_VECTORS 8
|
||||
|
||||
#else
|
||||
|
||||
# define SPURIOUS_APIC_VECTOR 0xff
|
||||
# define ERROR_APIC_VECTOR 0xfe
|
||||
# define RESCHEDULE_VECTOR 0xfd
|
||||
# define CALL_FUNCTION_VECTOR 0xfc
|
||||
# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
|
||||
# define THERMAL_APIC_VECTOR 0xfa
|
||||
# define THRESHOLD_APIC_VECTOR 0xf9
|
||||
# define UV_BAU_MESSAGE 0xf8
|
||||
# define INVALIDATE_TLB_VECTOR_END 0xf7
|
||||
# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
|
||||
|
||||
#define NUM_INVALIDATE_TLB_VECTORS 8
|
||||
|
||||
#endif
|
||||
|
||||
/* f0-f7 used for spreading out TLB flushes: */
|
||||
#define INVALIDATE_TLB_VECTOR_END 0xf7
|
||||
#define INVALIDATE_TLB_VECTOR_START 0xf0
|
||||
#define NUM_INVALIDATE_TLB_VECTORS 8
|
||||
|
||||
/*
|
||||
* Local APIC timer IRQ vector is on a different priority level,
|
||||
* to work around the 'lost local interrupt if more than 2 IRQ
|
||||
* sources per level' errata.
|
||||
*/
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
|
||||
/*
|
||||
* Performance monitoring interrupt vector:
|
||||
*/
|
||||
#define LOCAL_PERF_VECTOR 0xee
|
||||
|
||||
/*
|
||||
* Performance monitoring interrupt vector:
|
||||
|
@ -98,80 +121,53 @@
|
|||
* start at 0x31(0x41) to spread out vectors evenly between priority
|
||||
* levels. (0x80 is the syscall vector)
|
||||
*/
|
||||
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
|
||||
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
|
||||
|
||||
#define NR_VECTORS 256
|
||||
#define NR_VECTORS 256
|
||||
|
||||
#define FPU_IRQ 13
|
||||
#define FPU_IRQ 13
|
||||
|
||||
#define FIRST_VM86_IRQ 3
|
||||
#define LAST_VM86_IRQ 15
|
||||
#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
|
||||
#define FIRST_VM86_IRQ 3
|
||||
#define LAST_VM86_IRQ 15
|
||||
|
||||
#define NR_IRQS_LEGACY 16
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline int invalid_vm86_irq(int irq)
|
||||
{
|
||||
return irq < 3 || irq > 15;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
|
||||
/*
|
||||
* Size the maximum number of interrupts.
|
||||
*
|
||||
* If the irq_desc[] array has a sparse layout, we can size things
|
||||
* generously - it scales up linearly with the maximum number of CPUs,
|
||||
* and the maximum number of IO-APICs, whichever is higher.
|
||||
*
|
||||
* In other cases we size more conservatively, to not create too large
|
||||
* static arrays.
|
||||
*/
|
||||
|
||||
#include <asm/apicnum.h> /* need MAX_IO_APICS */
|
||||
#define NR_IRQS_LEGACY 16
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
# if NR_CPUS < MAX_IO_APICS
|
||||
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
|
||||
#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
|
||||
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
# ifdef CONFIG_SPARSE_IRQ
|
||||
# define NR_IRQS \
|
||||
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
|
||||
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
|
||||
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
|
||||
# else
|
||||
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
|
||||
# if NR_CPUS < MAX_IO_APICS
|
||||
# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
|
||||
# else
|
||||
# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
|
||||
# define NR_IRQS \
|
||||
((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
|
||||
(NR_VECTORS + (8 * NR_CPUS)) : \
|
||||
(NR_VECTORS + (32 * MAX_IO_APICS))) \
|
||||
|
||||
#else /* !CONFIG_X86_IO_APIC: */
|
||||
# define NR_IRQS NR_IRQS_LEGACY
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_X86_VOYAGER)
|
||||
|
||||
# define NR_IRQS 224
|
||||
|
||||
#else /* IO_APIC || VOYAGER */
|
||||
|
||||
# define NR_IRQS 16
|
||||
|
||||
#endif
|
||||
|
||||
/* Voyager specific defines */
|
||||
/* These define the CPIs we use in linux */
|
||||
#define VIC_CPI_LEVEL0 0
|
||||
#define VIC_CPI_LEVEL1 1
|
||||
/* now the fake CPIs */
|
||||
#define VIC_TIMER_CPI 2
|
||||
#define VIC_INVALIDATE_CPI 3
|
||||
#define VIC_RESCHEDULE_CPI 4
|
||||
#define VIC_ENABLE_IRQ_CPI 5
|
||||
#define VIC_CALL_FUNCTION_CPI 6
|
||||
#define VIC_CALL_FUNCTION_SINGLE_CPI 7
|
||||
|
||||
/* Now the QIC CPIs: Since we don't need the two initial levels,
|
||||
* these are 2 less than the VIC CPIs */
|
||||
#define QIC_CPI_OFFSET 1
|
||||
#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
|
||||
#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
|
||||
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
|
||||
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
|
||||
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
|
||||
#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
|
||||
|
||||
#define VIC_START_FAKE_CPI VIC_TIMER_CPI
|
||||
#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
|
||||
|
||||
/* this is the SYS_INT CPI. */
|
||||
#define VIC_SYS_INT 8
|
||||
#define VIC_CMN_INT 15
|
||||
|
||||
/* This is the boot CPI for alternate processors. It gets overwritten
|
||||
* by the above once the system has activated all available processors */
|
||||
#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
|
||||
#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
|
||||
|
||||
|
||||
#endif /* _ASM_X86_IRQ_VECTORS_H */
|
||||
|
|
|
@ -9,23 +9,8 @@
|
|||
# define PAGES_NR 4
|
||||
#else
|
||||
# define PA_CONTROL_PAGE 0
|
||||
# define VA_CONTROL_PAGE 1
|
||||
# define PA_PGD 2
|
||||
# define VA_PGD 3
|
||||
# define PA_PUD_0 4
|
||||
# define VA_PUD_0 5
|
||||
# define PA_PMD_0 6
|
||||
# define VA_PMD_0 7
|
||||
# define PA_PTE_0 8
|
||||
# define VA_PTE_0 9
|
||||
# define PA_PUD_1 10
|
||||
# define VA_PUD_1 11
|
||||
# define PA_PMD_1 12
|
||||
# define VA_PMD_1 13
|
||||
# define PA_PTE_1 14
|
||||
# define VA_PTE_1 15
|
||||
# define PA_TABLE_PAGE 16
|
||||
# define PAGES_NR 17
|
||||
# define PA_TABLE_PAGE 1
|
||||
# define PAGES_NR 2
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
|
|||
unsigned long start_address) ATTRIB_NORET;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
struct kimage_arch {
|
||||
pgd_t *pgd;
|
||||
#ifdef CONFIG_X86_PAE
|
||||
|
@ -169,6 +154,12 @@ struct kimage_arch {
|
|||
pte_t *pte0;
|
||||
pte_t *pte1;
|
||||
};
|
||||
#else
|
||||
struct kimage_arch {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
#include <mach_apicdef.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
|
||||
static inline const struct cpumask *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
#define esr_disable (0)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/genapic.h>
|
||||
#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
|
||||
#define INT_DEST_MODE (genapic->int_dest_mode)
|
||||
#define TARGET_CPUS (genapic->target_cpus())
|
||||
#define apic_id_registered (genapic->apic_id_registered)
|
||||
#define init_apic_ldr (genapic->init_apic_ldr)
|
||||
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
|
||||
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
|
||||
#define phys_pkg_id (genapic->phys_pkg_id)
|
||||
#define vector_allocation_domain (genapic->vector_allocation_domain)
|
||||
#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
|
||||
#define send_IPI_self (genapic->send_IPI_self)
|
||||
#define wakeup_secondary_cpu (genapic->wakeup_cpu)
|
||||
extern void setup_apic_routing(void);
|
||||
#else
|
||||
#define INT_DELIVERY_MODE dest_LowestPrio
|
||||
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
|
||||
#define TARGET_CPUS (target_cpus())
|
||||
#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0];
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"Flat", nr_ioapics);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
}
|
||||
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
return phys_map;
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
|
|
@ -1,24 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
|
||||
|
||||
#include <asm/apic.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define APIC_ID_MASK (genapic->apic_id_mask)
|
||||
#define GET_APIC_ID(x) (genapic->get_apic_id(x))
|
||||
#define SET_APIC_ID(x) (genapic->set_apic_id(x))
|
||||
#else
|
||||
#define APIC_ID_MASK (0xF<<24)
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
if (APIC_XAPIC(ver))
|
||||
return (((x)>>24)&0xFF);
|
||||
else
|
||||
return (((x)>>24)&0xF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
|
|
@ -1,64 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
|
||||
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
void __send_IPI_shortcut(unsigned int shortcut, int vector);
|
||||
|
||||
extern int no_broadcast;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/genapic.h>
|
||||
#define send_IPI_mask (genapic->send_IPI_mask)
|
||||
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
|
||||
#else
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_bitmask(mask, vector);
|
||||
}
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
#endif
|
||||
|
||||
static inline void __local_send_IPI_allbutself(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
else
|
||||
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
|
||||
}
|
||||
|
||||
static inline void __local_send_IPI_all(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
else
|
||||
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define send_IPI_allbutself (genapic->send_IPI_allbutself)
|
||||
#define send_IPI_all (genapic->send_IPI_all)
|
||||
#else
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
/*
|
||||
* if there are no other CPUs in the system then we get an APIC send
|
||||
* error if we try to broadcast, thus avoid sending IPIs in this case.
|
||||
*/
|
||||
if (!(num_online_cpus() > 1))
|
||||
return;
|
||||
|
||||
__local_send_IPI_allbutself(vector);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
__local_send_IPI_all(vector);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
|
|
@ -1,17 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
|
||||
|
||||
static inline int
|
||||
mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Hook from generic ACPI tables.c */
|
||||
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
|
|
@ -1,12 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
|
||||
|
||||
#define MAX_IRQ_SOURCES 256
|
||||
|
||||
#if CONFIG_BASE_SMALL == 0
|
||||
#define MAX_MP_BUSSES 256
|
||||
#else
|
||||
#define MAX_MP_BUSSES 32
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
|
|
@ -1,41 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
|
||||
|
||||
#define TRAMPOLINE_PHYS_LOW (0x467)
|
||||
#define TRAMPOLINE_PHYS_HIGH (0x469)
|
||||
|
||||
static inline void wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Nothing to do for most platforms, since cleared by the INIT cycle */
|
||||
static inline void smp_callin_clear_local_apic(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void __inquire_remote_apic(int apicid)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline void inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
|
|
@ -1,15 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
|
||||
#define _ASM_X86_MACH_GENERIC_GPIO_H
|
||||
|
||||
int gpio_request(unsigned gpio, const char *label);
|
||||
void gpio_free(unsigned gpio);
|
||||
int gpio_direction_input(unsigned gpio);
|
||||
int gpio_direction_output(unsigned gpio, int value);
|
||||
int gpio_get_value(unsigned gpio);
|
||||
void gpio_set_value(unsigned gpio, int value);
|
||||
int gpio_to_irq(unsigned gpio);
|
||||
int irq_to_gpio(unsigned irq);
|
||||
|
||||
#include <asm-generic/gpio.h> /* cansleep wrappers */
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
|
|
@ -1,35 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
|
||||
|
||||
#include <asm/genapic.h>
|
||||
|
||||
#define esr_disable (genapic->ESR_DISABLE)
|
||||
#define NO_BALANCE_IRQ (genapic->no_balance_irq)
|
||||
#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
|
||||
#define INT_DEST_MODE (genapic->int_dest_mode)
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
|
||||
#define TARGET_CPUS (genapic->target_cpus())
|
||||
#define apic_id_registered (genapic->apic_id_registered)
|
||||
#define init_apic_ldr (genapic->init_apic_ldr)
|
||||
#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
|
||||
#define setup_apic_routing (genapic->setup_apic_routing)
|
||||
#define multi_timer_check (genapic->multi_timer_check)
|
||||
#define apicid_to_node (genapic->apicid_to_node)
|
||||
#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
|
||||
#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
|
||||
#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
|
||||
#define setup_portio_remap (genapic->setup_portio_remap)
|
||||
#define check_apicid_present (genapic->check_apicid_present)
|
||||
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
|
||||
#define check_apicid_used (genapic->check_apicid_used)
|
||||
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
|
||||
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
|
||||
#define vector_allocation_domain (genapic->vector_allocation_domain)
|
||||
#define enable_apic_mode (genapic->enable_apic_mode)
|
||||
#define phys_pkg_id (genapic->phys_pkg_id)
|
||||
#define wakeup_secondary_cpu (genapic->wakeup_cpu)
|
||||
|
||||
extern void generic_bigsmp_probe(void);
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
|
|
@ -1,11 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
|
||||
|
||||
#ifndef APIC_DEFINITION
|
||||
#include <asm/genapic.h>
|
||||
|
||||
#define GET_APIC_ID (genapic->get_apic_id)
|
||||
#define APIC_ID_MASK (genapic->apic_id_mask)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
|
|
@ -1,10 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
|
||||
|
||||
#include <asm/genapic.h>
|
||||
|
||||
#define send_IPI_mask (genapic->send_IPI_mask)
|
||||
#define send_IPI_allbutself (genapic->send_IPI_allbutself)
|
||||
#define send_IPI_all (genapic->send_IPI_all)
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
|
|
@ -1,9 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
|
||||
|
||||
|
||||
extern int mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
extern int acpi_madt_oem_check(char *, char *);
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
|
|
@ -1,12 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
|
||||
|
||||
#define MAX_IRQ_SOURCES 256
|
||||
|
||||
/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
|
||||
/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
|
||||
#define MAX_MP_BUSSES 260
|
||||
|
||||
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
|
|
@ -1,12 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
|
||||
|
||||
#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
|
||||
#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
|
||||
#define wait_for_init_deassert (genapic->wait_for_init_deassert)
|
||||
#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
|
||||
#define store_NMI_vector (genapic->store_NMI_vector)
|
||||
#define restore_NMI_vector (genapic->restore_NMI_vector)
|
||||
#define inquire_remote_apic (genapic->inquire_remote_apic)
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
|
|
@ -1,60 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
|
||||
#define _ASM_X86_MACH_RDC321X_GPIO_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
extern int rdc_gpio_get_value(unsigned gpio);
|
||||
extern void rdc_gpio_set_value(unsigned gpio, int value);
|
||||
extern int rdc_gpio_direction_input(unsigned gpio);
|
||||
extern int rdc_gpio_direction_output(unsigned gpio, int value);
|
||||
extern int rdc_gpio_request(unsigned gpio, const char *label);
|
||||
extern void rdc_gpio_free(unsigned gpio);
|
||||
extern void __init rdc321x_gpio_setup(void);
|
||||
|
||||
/* Wrappers for the arch-neutral GPIO API */
|
||||
|
||||
static inline int gpio_request(unsigned gpio, const char *label)
|
||||
{
|
||||
return rdc_gpio_request(gpio, label);
|
||||
}
|
||||
|
||||
static inline void gpio_free(unsigned gpio)
|
||||
{
|
||||
might_sleep();
|
||||
rdc_gpio_free(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_direction_input(unsigned gpio)
|
||||
{
|
||||
return rdc_gpio_direction_input(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
return rdc_gpio_direction_output(gpio, value);
|
||||
}
|
||||
|
||||
static inline int gpio_get_value(unsigned gpio)
|
||||
{
|
||||
return rdc_gpio_get_value(gpio);
|
||||
}
|
||||
|
||||
static inline void gpio_set_value(unsigned gpio, int value)
|
||||
{
|
||||
rdc_gpio_set_value(gpio, value);
|
||||
}
|
||||
|
||||
static inline int gpio_to_irq(unsigned gpio)
|
||||
{
|
||||
return gpio;
|
||||
}
|
||||
|
||||
static inline int irq_to_gpio(unsigned irq)
|
||||
{
|
||||
return irq;
|
||||
}
|
||||
|
||||
/* For cansleep */
|
||||
#include <asm-generic/gpio.h>
|
||||
|
||||
#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
|
|
@ -79,7 +79,7 @@ do { \
|
|||
#ifdef CONFIG_X86_32
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
do { \
|
||||
loadsegment(gs, 0); \
|
||||
lazy_load_gs(0); \
|
||||
} while (0)
|
||||
#else
|
||||
#define deactivate_mm(tsk, mm) \
|
||||
|
|
|
@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
|
|||
extern int pic_mode;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#include <mach_mpspec.h>
|
||||
|
||||
/*
|
||||
* Summit or generic (i.e. installer) kernels need lots of bus entries.
|
||||
* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
|
||||
*/
|
||||
#if CONFIG_BASE_SMALL == 0
|
||||
# define MAX_MP_BUSSES 260
|
||||
#else
|
||||
# define MAX_MP_BUSSES 32
|
||||
#endif
|
||||
|
||||
#define MAX_IRQ_SOURCES 256
|
||||
|
||||
extern unsigned int def_to_bigsmp;
|
||||
extern u8 apicid_2_node[];
|
||||
|
@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
|
|||
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
|
||||
#endif
|
||||
|
||||
#define MAX_APICID 256
|
||||
#define MAX_APICID 256
|
||||
|
||||
#else
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
#define MAX_MP_BUSSES 256
|
||||
#define MAX_MP_BUSSES 256
|
||||
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
|
||||
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
|
||||
#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
extern void early_find_smp_config(void);
|
||||
extern void early_get_smp_config(void);
|
||||
|
@ -45,11 +56,13 @@ extern int smp_found_config;
|
|||
extern int mpc_default_type;
|
||||
extern unsigned long mp_lapic_addr;
|
||||
|
||||
extern void find_smp_config(void);
|
||||
extern void get_smp_config(void);
|
||||
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
extern void find_smp_config(void);
|
||||
extern void early_reserve_e820_mpc_new(void);
|
||||
#else
|
||||
static inline void find_smp_config(void) { }
|
||||
static inline void early_reserve_e820_mpc_new(void) { }
|
||||
#endif
|
||||
|
||||
|
@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
|
|||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity);
|
||||
extern int mp_find_ioapic(int gsi);
|
||||
extern int mp_find_ioapic_pin(int ioapic, int gsi);
|
||||
#else
|
||||
static inline int
|
||||
mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
|
@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
|
|||
|
||||
extern physid_mask_t phys_cpu_present_map;
|
||||
|
||||
extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
extern int default_acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
#endif /* _ASM_X86_MPSPEC_H */
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
extern int found_numaq;
|
||||
extern int get_memcfg_numaq(void);
|
||||
|
||||
extern void *xquad_portio;
|
||||
|
||||
/*
|
||||
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
|
||||
*/
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
#ifndef __ASM_NUMAQ_APIC_H
|
||||
#define __ASM_NUMAQ_APIC_H
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/nodemask.h>
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
#define NO_BALANCE_IRQ (1)
|
||||
#define esr_disable (1)
|
||||
|
||||
#define INT_DELIVERY_MODE dest_LowestPrio
|
||||
#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
}
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
#define apicid_cluster(apicid) (apicid & 0xF0)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
/* Already done in NUMA-Q firmware */
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"NUMA-Q", nr_ioapics);
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip adding the timer int on secondary nodes, which causes
|
||||
* a small but painful rift in the time-space continuum.
|
||||
*/
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return apic != 0 && irq == 0;
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* We don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xFUL);
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
}
|
||||
|
||||
/*
|
||||
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
|
||||
* cpu to APIC ID relation to properly interact with the intelligent
|
||||
* mode of the cluster controller.
|
||||
*/
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < 60)
|
||||
return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return logical_apicid >> 4;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
|
||||
{
|
||||
int node = apicid_to_node(logical_apicid);
|
||||
int cpu = __ffs(logical_apicid & 0xf);
|
||||
|
||||
return physid_mask_of_physid(cpu + 4*node);
|
||||
}
|
||||
|
||||
extern void *xquad_portio;
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
int num_quads = num_online_nodes();
|
||||
|
||||
if (num_quads <= 1)
|
||||
return;
|
||||
|
||||
printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
|
||||
xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
|
||||
printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
|
||||
(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* We use physical apicids here, not logical, so just return the default
|
||||
* physical broadcast to stop people from breaking us
|
||||
*/
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
return (int) 0xF;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
return (int) 0xF;
|
||||
}
|
||||
|
||||
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_NUMAQ_APIC_H */
|
|
@ -1,14 +0,0 @@
|
|||
#ifndef __ASM_NUMAQ_APICDEF_H
|
||||
#define __ASM_NUMAQ_APICDEF_H
|
||||
|
||||
|
||||
#define APIC_ID_MASK (0xF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0x0F);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_NUMAQ_IPI_H
|
||||
#define __ASM_NUMAQ_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_NUMAQ_IPI_H */
|
|
@ -1,6 +0,0 @@
|
|||
#ifndef __ASM_NUMAQ_MPPARSE_H
|
||||
#define __ASM_NUMAQ_MPPARSE_H
|
||||
|
||||
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
#endif /* __ASM_NUMAQ_MPPARSE_H */
|
|
@ -1,45 +0,0 @@
|
|||
#ifndef __ASM_NUMAQ_WAKECPU_H
|
||||
#define __ASM_NUMAQ_WAKECPU_H
|
||||
|
||||
/* This file copes with machines that wakeup secondary CPUs by NMIs */
|
||||
|
||||
#define TRAMPOLINE_PHYS_LOW (0x8)
|
||||
#define TRAMPOLINE_PHYS_HIGH (0xa)
|
||||
|
||||
/* We don't do anything here because we use NMI's to boot instead */
|
||||
static inline void wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because we use NMIs rather than the INIT-STARTUP sequence to
|
||||
* bootstrap the CPUs, the APIC may be in a weird state. Kick it.
|
||||
*/
|
||||
static inline void smp_callin_clear_local_apic(void)
|
||||
{
|
||||
clear_local_APIC();
|
||||
}
|
||||
|
||||
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
printk("Storing NMI vector\n");
|
||||
*high =
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
|
||||
*low =
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
|
||||
}
|
||||
|
||||
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
printk("Restoring NMI vector\n");
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
|
||||
*high;
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
|
||||
*low;
|
||||
}
|
||||
|
||||
static inline void inquire_remote_apic(int apicid)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __ASM_NUMAQ_WAKECPU_H */
|
|
@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
|
|||
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
||||
|
||||
extern int page_is_ram(unsigned long pagenr);
|
||||
extern int pagerange_is_ram(unsigned long start, unsigned long end);
|
||||
extern int devmem_is_allowed(unsigned long pagenr);
|
||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||
pgprot_t vma_prot);
|
||||
|
@ -95,6 +94,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
|
|||
return pgd.pgd;
|
||||
}
|
||||
|
||||
static inline pgdval_t pgd_flags(pgd_t pgd)
|
||||
{
|
||||
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
typedef struct { pudval_t pud; } pud_t;
|
||||
|
@ -117,6 +121,11 @@ static inline pudval_t native_pud_val(pud_t pud)
|
|||
}
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
|
||||
static inline pudval_t pud_flags(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
typedef struct { pmdval_t pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t native_make_pmd(pmdval_t val)
|
||||
|
@ -128,6 +137,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|||
{
|
||||
return pmd.pmd;
|
||||
}
|
||||
|
||||
#else /* PAGETABLE_LEVELS == 2 */
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
|
@ -137,6 +147,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|||
}
|
||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
|
||||
static inline pmdval_t pmd_flags(pmd_t pmd)
|
||||
{
|
||||
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
static inline pte_t native_make_pte(pteval_t val)
|
||||
{
|
||||
return (pte_t) { .pte = val };
|
||||
|
@ -147,7 +162,7 @@ static inline pteval_t native_pte_val(pte_t pte)
|
|||
return pte.pte;
|
||||
}
|
||||
|
||||
static inline pteval_t native_pte_flags(pte_t pte)
|
||||
static inline pteval_t pte_flags(pte_t pte)
|
||||
{
|
||||
return native_pte_val(pte) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
@ -173,7 +188,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
|
|||
#endif
|
||||
|
||||
#define pte_val(x) native_pte_val(x)
|
||||
#define pte_flags(x) native_pte_flags(x)
|
||||
#define __pte(x) native_make_pte(x)
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
|
|
@ -12,21 +12,38 @@
|
|||
#define CLBR_EAX (1 << 0)
|
||||
#define CLBR_ECX (1 << 1)
|
||||
#define CLBR_EDX (1 << 2)
|
||||
#define CLBR_EDI (1 << 3)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define CLBR_RSI (1 << 3)
|
||||
#define CLBR_RDI (1 << 4)
|
||||
#ifdef CONFIG_X86_32
|
||||
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
|
||||
#define CLBR_ANY ((1 << 4) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
|
||||
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
|
||||
#define CLBR_SCRATCH (0)
|
||||
#else
|
||||
#define CLBR_RAX CLBR_EAX
|
||||
#define CLBR_RCX CLBR_ECX
|
||||
#define CLBR_RDX CLBR_EDX
|
||||
#define CLBR_RDI CLBR_EDI
|
||||
#define CLBR_RSI (1 << 4)
|
||||
#define CLBR_R8 (1 << 5)
|
||||
#define CLBR_R9 (1 << 6)
|
||||
#define CLBR_R10 (1 << 7)
|
||||
#define CLBR_R11 (1 << 8)
|
||||
|
||||
#define CLBR_ANY ((1 << 9) - 1)
|
||||
|
||||
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
|
||||
CLBR_RCX | CLBR_R8 | CLBR_R9)
|
||||
#define CLBR_RET_REG (CLBR_RAX)
|
||||
#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
|
||||
|
||||
#include <asm/desc_defs.h>
|
||||
#else
|
||||
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
|
||||
#define CLBR_ANY ((1 << 3) - 1)
|
||||
#endif /* X86_64 */
|
||||
|
||||
#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
@ -40,6 +57,14 @@ struct tss_struct;
|
|||
struct mm_struct;
|
||||
struct desc_struct;
|
||||
|
||||
/*
|
||||
* Wrapper type for pointers to code which uses the non-standard
|
||||
* calling convention. See PV_CALL_SAVE_REGS_THUNK below.
|
||||
*/
|
||||
struct paravirt_callee_save {
|
||||
void *func;
|
||||
};
|
||||
|
||||
/* general info */
|
||||
struct pv_info {
|
||||
unsigned int kernel_rpl;
|
||||
|
@ -189,11 +214,15 @@ struct pv_irq_ops {
|
|||
* expected to use X86_EFLAGS_IF; all other bits
|
||||
* returned from save_fl are undefined, and may be ignored by
|
||||
* restore_fl.
|
||||
*
|
||||
* NOTE: These functions callers expect the callee to preserve
|
||||
* more registers than the standard C calling convention.
|
||||
*/
|
||||
unsigned long (*save_fl)(void);
|
||||
void (*restore_fl)(unsigned long);
|
||||
void (*irq_disable)(void);
|
||||
void (*irq_enable)(void);
|
||||
struct paravirt_callee_save save_fl;
|
||||
struct paravirt_callee_save restore_fl;
|
||||
struct paravirt_callee_save irq_disable;
|
||||
struct paravirt_callee_save irq_enable;
|
||||
|
||||
void (*safe_halt)(void);
|
||||
void (*halt)(void);
|
||||
|
||||
|
@ -279,12 +308,11 @@ struct pv_mmu_ops {
|
|||
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
|
||||
pteval_t (*pte_val)(pte_t);
|
||||
pteval_t (*pte_flags)(pte_t);
|
||||
pte_t (*make_pte)(pteval_t pte);
|
||||
struct paravirt_callee_save pte_val;
|
||||
struct paravirt_callee_save make_pte;
|
||||
|
||||
pgdval_t (*pgd_val)(pgd_t);
|
||||
pgd_t (*make_pgd)(pgdval_t pgd);
|
||||
struct paravirt_callee_save pgd_val;
|
||||
struct paravirt_callee_save make_pgd;
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
|
@ -299,12 +327,12 @@ struct pv_mmu_ops {
|
|||
|
||||
void (*set_pud)(pud_t *pudp, pud_t pudval);
|
||||
|
||||
pmdval_t (*pmd_val)(pmd_t);
|
||||
pmd_t (*make_pmd)(pmdval_t pmd);
|
||||
struct paravirt_callee_save pmd_val;
|
||||
struct paravirt_callee_save make_pmd;
|
||||
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
pudval_t (*pud_val)(pud_t);
|
||||
pud_t (*make_pud)(pudval_t pud);
|
||||
struct paravirt_callee_save pud_val;
|
||||
struct paravirt_callee_save make_pud;
|
||||
|
||||
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
|
@ -389,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
|
|||
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
|
||||
|
||||
unsigned paravirt_patch_nop(void);
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ignore(unsigned len);
|
||||
unsigned paravirt_patch_call(void *insnbuf,
|
||||
const void *target, u16 tgt_clobbers,
|
||||
|
@ -480,25 +510,45 @@ int paravirt_disable_iospace(void);
|
|||
* makes sure the incoming and outgoing types are always correct.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS
|
||||
#define VEXTRA_CLOBBERS
|
||||
#else
|
||||
#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __edi = __edi, __esi = __esi, \
|
||||
__edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
|
||||
|
||||
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
|
||||
"=S" (__esi), "=d" (__edx), \
|
||||
"=c" (__ecx)
|
||||
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
||||
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
|
||||
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
|
||||
#endif
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_DEBUG
|
||||
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
|
||||
|
@ -506,10 +556,11 @@ int paravirt_disable_iospace(void);
|
|||
#define PVOP_TEST_NULL(op) ((void)op)
|
||||
#endif
|
||||
|
||||
#define __PVOP_CALL(rettype, op, pre, post, ...) \
|
||||
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
|
||||
pre, post, ...) \
|
||||
({ \
|
||||
rettype __ret; \
|
||||
PVOP_CALL_ARGS; \
|
||||
PVOP_CALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
/* This is 32-bit specific, but is okay in 64-bit */ \
|
||||
/* since this condition will never hold */ \
|
||||
|
@ -517,70 +568,113 @@ int paravirt_disable_iospace(void);
|
|||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: PVOP_CALL_CLOBBERS \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(CLBR_ANY), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" EXTRA_CLOBBERS); \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
|
||||
} else { \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: PVOP_CALL_CLOBBERS \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(CLBR_ANY), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" EXTRA_CLOBBERS); \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
__ret = (rettype)__eax; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
#define __PVOP_VCALL(op, pre, post, ...) \
|
||||
|
||||
#define __PVOP_CALL(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
|
||||
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_CALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
|
||||
({ \
|
||||
PVOP_VCALL_ARGS; \
|
||||
PVOP_TEST_NULL(op); \
|
||||
asm volatile(pre \
|
||||
paravirt_alt(PARAVIRT_CALL) \
|
||||
post \
|
||||
: PVOP_VCALL_CLOBBERS \
|
||||
: call_clbr \
|
||||
: paravirt_type(op), \
|
||||
paravirt_clobber(CLBR_ANY), \
|
||||
paravirt_clobber(clbr), \
|
||||
##__VA_ARGS__ \
|
||||
: "memory", "cc" VEXTRA_CLOBBERS); \
|
||||
: "memory", "cc" extra_clbr); \
|
||||
})
|
||||
|
||||
#define __PVOP_VCALL(op, pre, post, ...) \
|
||||
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
|
||||
VEXTRA_CLOBBERS, \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
PVOP_VCALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
||||
|
||||
#define PVOP_CALL0(rettype, op) \
|
||||
__PVOP_CALL(rettype, op, "", "")
|
||||
#define PVOP_VCALL0(op) \
|
||||
__PVOP_VCALL(op, "", "")
|
||||
|
||||
#define PVOP_CALLEE0(rettype, op) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "")
|
||||
#define PVOP_VCALLEE0(op) \
|
||||
__PVOP_VCALLEESAVE(op, "", "")
|
||||
|
||||
|
||||
#define PVOP_CALL1(rettype, op, arg1) \
|
||||
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALL1(op, arg1) \
|
||||
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
#define PVOP_CALLEE1(rettype, op, arg1) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
#define PVOP_VCALLEE1(op, arg1) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
|
||||
|
||||
|
||||
#define PVOP_CALL2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1" ((unsigned long)(arg2)))
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALL2(op, arg1, arg2) \
|
||||
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1" ((unsigned long)(arg2)))
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
|
||||
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
#define PVOP_VCALLEE2(op, arg1, arg2) \
|
||||
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2))
|
||||
|
||||
|
||||
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
|
||||
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
|
||||
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
|
||||
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
|
||||
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
|
||||
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
|
||||
|
||||
/* This is the only difference in x86_64. We can make it much simpler */
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
|
||||
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, \
|
||||
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
|
||||
|
@ -588,13 +682,13 @@ int paravirt_disable_iospace(void);
|
|||
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
|
||||
#else
|
||||
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
|
||||
"3"((unsigned long)(arg4)))
|
||||
__PVOP_CALL(rettype, op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
|
||||
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
|
||||
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
|
||||
"3"((unsigned long)(arg4)))
|
||||
__PVOP_VCALL(op, "", "", \
|
||||
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
|
||||
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
|
||||
#endif
|
||||
|
||||
static inline int paravirt_enabled(void)
|
||||
|
@ -1061,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
|
|||
pteval_t ret;
|
||||
|
||||
if (sizeof(pteval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pteval_t,
|
||||
pv_mmu_ops.make_pte,
|
||||
val, (u64)val >> 32);
|
||||
ret = PVOP_CALLEE2(pteval_t,
|
||||
pv_mmu_ops.make_pte,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pteval_t,
|
||||
pv_mmu_ops.make_pte,
|
||||
val);
|
||||
ret = PVOP_CALLEE1(pteval_t,
|
||||
pv_mmu_ops.make_pte,
|
||||
val);
|
||||
|
||||
return (pte_t) { .pte = ret };
|
||||
}
|
||||
|
@ -1077,42 +1171,25 @@ static inline pteval_t pte_val(pte_t pte)
|
|||
pteval_t ret;
|
||||
|
||||
if (sizeof(pteval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
|
||||
pte.pte, (u64)pte.pte >> 32);
|
||||
ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
|
||||
pte.pte, (u64)pte.pte >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
|
||||
pte.pte);
|
||||
ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
|
||||
pte.pte);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline pteval_t pte_flags(pte_t pte)
|
||||
{
|
||||
pteval_t ret;
|
||||
|
||||
if (sizeof(pteval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
|
||||
pte.pte, (u64)pte.pte >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
|
||||
pte.pte);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_DEBUG
|
||||
BUG_ON(ret & PTE_PFN_MASK);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline pgd_t __pgd(pgdval_t val)
|
||||
{
|
||||
pgdval_t ret;
|
||||
|
||||
if (sizeof(pgdval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
|
||||
val, (u64)val >> 32);
|
||||
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
|
||||
val);
|
||||
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
|
||||
val);
|
||||
|
||||
return (pgd_t) { ret };
|
||||
}
|
||||
|
@ -1122,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
|
|||
pgdval_t ret;
|
||||
|
||||
if (sizeof(pgdval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
|
||||
pgd.pgd, (u64)pgd.pgd >> 32);
|
||||
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
|
||||
pgd.pgd, (u64)pgd.pgd >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
|
||||
pgd.pgd);
|
||||
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
|
||||
pgd.pgd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1190,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
|
|||
pmdval_t ret;
|
||||
|
||||
if (sizeof(pmdval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
|
||||
val, (u64)val >> 32);
|
||||
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
|
||||
val);
|
||||
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
|
||||
val);
|
||||
|
||||
return (pmd_t) { ret };
|
||||
}
|
||||
|
@ -1204,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
|
|||
pmdval_t ret;
|
||||
|
||||
if (sizeof(pmdval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
|
||||
pmd.pmd, (u64)pmd.pmd >> 32);
|
||||
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
|
||||
pmd.pmd, (u64)pmd.pmd >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
|
||||
pmd.pmd);
|
||||
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
|
||||
pmd.pmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1230,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
|
|||
pudval_t ret;
|
||||
|
||||
if (sizeof(pudval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
|
||||
val, (u64)val >> 32);
|
||||
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
|
||||
val, (u64)val >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
|
||||
val);
|
||||
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
|
||||
val);
|
||||
|
||||
return (pud_t) { ret };
|
||||
}
|
||||
|
@ -1244,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
|
|||
pudval_t ret;
|
||||
|
||||
if (sizeof(pudval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
|
||||
pud.pud, (u64)pud.pud >> 32);
|
||||
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
|
||||
pud.pud, (u64)pud.pud >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
|
||||
pud.pud);
|
||||
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
|
||||
pud.pud);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1354,14 +1431,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
|
|||
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
static inline void arch_flush_lazy_cpu_mode(void)
|
||||
{
|
||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_enter_lazy_cpu_mode();
|
||||
}
|
||||
}
|
||||
|
||||
void arch_flush_lazy_cpu_mode(void);
|
||||
|
||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||
static inline void arch_enter_lazy_mmu_mode(void)
|
||||
|
@ -1374,13 +1444,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
|||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
static inline void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
}
|
||||
void arch_flush_lazy_mmu_mode(void);
|
||||
|
||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
unsigned long phys, pgprot_t flags)
|
||||
|
@ -1389,9 +1453,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
|||
}
|
||||
|
||||
void _paravirt_nop(void);
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
u32 _paravirt_ident_32(u32);
|
||||
u64 _paravirt_ident_64(u64);
|
||||
|
||||
void paravirt_use_bytelocks(void);
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
|
@ -1441,12 +1506,37 @@ extern struct paravirt_patch_site __parainstructions[],
|
|||
__parainstructions_end[];
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
|
||||
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
|
||||
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
|
||||
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
|
||||
|
||||
/* save and restore all caller-save registers, except return value */
|
||||
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
|
||||
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
|
||||
|
||||
#define PV_FLAGS_ARG "0"
|
||||
#define PV_EXTRA_CLOBBERS
|
||||
#define PV_VEXTRA_CLOBBERS
|
||||
#else
|
||||
/* save and restore all caller-save registers, except return value */
|
||||
#define PV_SAVE_ALL_CALLER_REGS \
|
||||
"push %rcx;" \
|
||||
"push %rdx;" \
|
||||
"push %rsi;" \
|
||||
"push %rdi;" \
|
||||
"push %r8;" \
|
||||
"push %r9;" \
|
||||
"push %r10;" \
|
||||
"push %r11;"
|
||||
#define PV_RESTORE_ALL_CALLER_REGS \
|
||||
"pop %r11;" \
|
||||
"pop %r10;" \
|
||||
"pop %r9;" \
|
||||
"pop %r8;" \
|
||||
"pop %rdi;" \
|
||||
"pop %rsi;" \
|
||||
"pop %rdx;" \
|
||||
"pop %rcx;"
|
||||
|
||||
/* We save some registers, but all of them, that's too much. We clobber all
|
||||
* caller saved registers but the argument parameter */
|
||||
#define PV_SAVE_REGS "pushq %%rdi;"
|
||||
|
@ -1456,52 +1546,76 @@ extern struct paravirt_patch_site __parainstructions[],
|
|||
#define PV_FLAGS_ARG "D"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate a thunk around a function which saves all caller-save
|
||||
* registers except for the return value. This allows C functions to
|
||||
* be called from assembler code where fewer than normal registers are
|
||||
* available. It may also help code generation around calls from C
|
||||
* code if the common case doesn't use many registers.
|
||||
*
|
||||
* When a callee is wrapped in a thunk, the caller can assume that all
|
||||
* arg regs and all scratch registers are preserved across the
|
||||
* call. The return value in rax/eax will not be saved, even for void
|
||||
* functions.
|
||||
*/
|
||||
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
|
||||
extern typeof(func) __raw_callee_save_##func; \
|
||||
static void *__##func##__ __used = func; \
|
||||
\
|
||||
asm(".pushsection .text;" \
|
||||
"__raw_callee_save_" #func ": " \
|
||||
PV_SAVE_ALL_CALLER_REGS \
|
||||
"call " #func ";" \
|
||||
PV_RESTORE_ALL_CALLER_REGS \
|
||||
"ret;" \
|
||||
".popsection")
|
||||
|
||||
/* Get a reference to a callee-save function */
|
||||
#define PV_CALLEE_SAVE(func) \
|
||||
((struct paravirt_callee_save) { __raw_callee_save_##func })
|
||||
|
||||
/* Promise that "func" already uses the right calling convention */
|
||||
#define __PV_IS_CALLEE_SAVE(func) \
|
||||
((struct paravirt_callee_save) { func })
|
||||
|
||||
static inline unsigned long __raw_local_save_flags(void)
|
||||
{
|
||||
unsigned long f;
|
||||
|
||||
asm volatile(paravirt_alt(PV_SAVE_REGS
|
||||
PARAVIRT_CALL
|
||||
PV_RESTORE_REGS)
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
: "=a"(f)
|
||||
: paravirt_type(pv_irq_ops.save_fl),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "cc" PV_VEXTRA_CLOBBERS);
|
||||
: "memory", "cc");
|
||||
return f;
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_restore(unsigned long f)
|
||||
{
|
||||
asm volatile(paravirt_alt(PV_SAVE_REGS
|
||||
PARAVIRT_CALL
|
||||
PV_RESTORE_REGS)
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
: "=a"(f)
|
||||
: PV_FLAGS_ARG(f),
|
||||
paravirt_type(pv_irq_ops.restore_fl),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "cc" PV_EXTRA_CLOBBERS);
|
||||
: "memory", "cc");
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_disable(void)
|
||||
{
|
||||
asm volatile(paravirt_alt(PV_SAVE_REGS
|
||||
PARAVIRT_CALL
|
||||
PV_RESTORE_REGS)
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
:
|
||||
: paravirt_type(pv_irq_ops.irq_disable),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
|
||||
: "memory", "eax", "cc");
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_enable(void)
|
||||
{
|
||||
asm volatile(paravirt_alt(PV_SAVE_REGS
|
||||
PARAVIRT_CALL
|
||||
PV_RESTORE_REGS)
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
:
|
||||
: paravirt_type(pv_irq_ops.irq_enable),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
|
||||
: "memory", "eax", "cc");
|
||||
}
|
||||
|
||||
static inline unsigned long __raw_local_irq_save(void)
|
||||
|
@ -1544,33 +1658,49 @@ static inline unsigned long __raw_local_irq_save(void)
|
|||
.popsection
|
||||
|
||||
|
||||
#define COND_PUSH(set, mask, reg) \
|
||||
.if ((~(set)) & mask); push %reg; .endif
|
||||
#define COND_POP(set, mask, reg) \
|
||||
.if ((~(set)) & mask); pop %reg; .endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define PV_SAVE_REGS \
|
||||
push %rax; \
|
||||
push %rcx; \
|
||||
push %rdx; \
|
||||
push %rsi; \
|
||||
push %rdi; \
|
||||
push %r8; \
|
||||
push %r9; \
|
||||
push %r10; \
|
||||
push %r11
|
||||
#define PV_RESTORE_REGS \
|
||||
pop %r11; \
|
||||
pop %r10; \
|
||||
pop %r9; \
|
||||
pop %r8; \
|
||||
pop %rdi; \
|
||||
pop %rsi; \
|
||||
pop %rdx; \
|
||||
pop %rcx; \
|
||||
pop %rax
|
||||
|
||||
#define PV_SAVE_REGS(set) \
|
||||
COND_PUSH(set, CLBR_RAX, rax); \
|
||||
COND_PUSH(set, CLBR_RCX, rcx); \
|
||||
COND_PUSH(set, CLBR_RDX, rdx); \
|
||||
COND_PUSH(set, CLBR_RSI, rsi); \
|
||||
COND_PUSH(set, CLBR_RDI, rdi); \
|
||||
COND_PUSH(set, CLBR_R8, r8); \
|
||||
COND_PUSH(set, CLBR_R9, r9); \
|
||||
COND_PUSH(set, CLBR_R10, r10); \
|
||||
COND_PUSH(set, CLBR_R11, r11)
|
||||
#define PV_RESTORE_REGS(set) \
|
||||
COND_POP(set, CLBR_R11, r11); \
|
||||
COND_POP(set, CLBR_R10, r10); \
|
||||
COND_POP(set, CLBR_R9, r9); \
|
||||
COND_POP(set, CLBR_R8, r8); \
|
||||
COND_POP(set, CLBR_RDI, rdi); \
|
||||
COND_POP(set, CLBR_RSI, rsi); \
|
||||
COND_POP(set, CLBR_RDX, rdx); \
|
||||
COND_POP(set, CLBR_RCX, rcx); \
|
||||
COND_POP(set, CLBR_RAX, rax)
|
||||
|
||||
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
|
||||
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
|
||||
#define PARA_INDIRECT(addr) *addr(%rip)
|
||||
#else
|
||||
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
|
||||
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
|
||||
#define PV_SAVE_REGS(set) \
|
||||
COND_PUSH(set, CLBR_EAX, eax); \
|
||||
COND_PUSH(set, CLBR_EDI, edi); \
|
||||
COND_PUSH(set, CLBR_ECX, ecx); \
|
||||
COND_PUSH(set, CLBR_EDX, edx)
|
||||
#define PV_RESTORE_REGS(set) \
|
||||
COND_POP(set, CLBR_EDX, edx); \
|
||||
COND_POP(set, CLBR_ECX, ecx); \
|
||||
COND_POP(set, CLBR_EDI, edi); \
|
||||
COND_POP(set, CLBR_EAX, eax)
|
||||
|
||||
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
|
||||
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
|
||||
#define PARA_INDIRECT(addr) *%cs:addr
|
||||
|
@ -1582,15 +1712,15 @@ static inline unsigned long __raw_local_irq_save(void)
|
|||
|
||||
#define DISABLE_INTERRUPTS(clobbers) \
|
||||
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
|
||||
PV_SAVE_REGS; \
|
||||
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
|
||||
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
|
||||
PV_RESTORE_REGS;) \
|
||||
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
|
||||
|
||||
#define ENABLE_INTERRUPTS(clobbers) \
|
||||
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
|
||||
PV_SAVE_REGS; \
|
||||
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
|
||||
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
|
||||
PV_RESTORE_REGS;)
|
||||
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
|
||||
|
||||
#define USERGS_SYSRET32 \
|
||||
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
|
||||
|
@ -1620,11 +1750,15 @@ static inline unsigned long __raw_local_irq_save(void)
|
|||
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
|
||||
swapgs)
|
||||
|
||||
/*
|
||||
* Note: swapgs is very special, and in practise is either going to be
|
||||
* implemented with a single "swapgs" instruction or something very
|
||||
* special. Either way, we don't need to save any registers for
|
||||
* it.
|
||||
*/
|
||||
#define SWAPGS \
|
||||
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
|
||||
PV_SAVE_REGS; \
|
||||
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
|
||||
PV_RESTORE_REGS \
|
||||
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
|
||||
)
|
||||
|
||||
#define GET_CR2_INTO_RCX \
|
||||
|
|
|
@ -5,10 +5,8 @@
|
|||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int pat_enabled;
|
||||
extern void validate_pat_support(struct cpuinfo_x86 *c);
|
||||
#else
|
||||
static const int pat_enabled;
|
||||
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
|
||||
#endif
|
||||
|
||||
extern void pat_init(void);
|
||||
|
@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
|
|||
unsigned long req_type, unsigned long *ret_type);
|
||||
extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern void pat_disable(char *reason);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
|
|
@ -34,6 +34,12 @@
|
|||
#define PER_CPU_VAR(var) per_cpu__##var
|
||||
#endif /* SMP */
|
||||
|
||||
#ifdef CONFIG_X86_64_SMP
|
||||
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
|
||||
#else
|
||||
#define INIT_PER_CPU_VAR(var) per_cpu__##var
|
||||
#endif
|
||||
|
||||
#else /* ...!ASSEMBLY */
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
@ -45,6 +51,22 @@
|
|||
#define __percpu_arg(x) "%" #x
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialized pointers to per-cpu variables needed for the boot
|
||||
* processor need to use these macros to get the proper address
|
||||
* offset from __per_cpu_load on SMP.
|
||||
*
|
||||
* There also must be an entry in vmlinux_64.lds.S
|
||||
*/
|
||||
#define DECLARE_INIT_PER_CPU(var) \
|
||||
extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
|
||||
|
||||
#ifdef CONFIG_X86_64_SMP
|
||||
#define init_per_cpu_var(var) init_per_cpu__##var
|
||||
#else
|
||||
#define init_per_cpu_var(var) per_cpu_var(var)
|
||||
#endif
|
||||
|
||||
/* For arch-specific code, we can use direct single-insn ops (they
|
||||
* don't give an lvalue though). */
|
||||
extern void __bad_percpu_size(void);
|
||||
|
|
|
@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
|||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#define pte_none(x) (!(x).pte_low)
|
||||
|
||||
/*
|
||||
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
|
||||
* split up the 29 bits of offset into this range:
|
||||
|
|
|
@ -18,21 +18,6 @@
|
|||
printk("%s:%d: bad pgd %p(%016Lx).\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
/* Rules for using set_pte: the pte being assigned *must* be
|
||||
* either not present or in a state where the hardware will
|
||||
* not attempt to update the pte. In places where this is
|
||||
|
@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
|
|||
write_cr3(pgd);
|
||||
}
|
||||
|
||||
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
|
||||
|
||||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
|
||||
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
|
||||
pmd_index(address))
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
||||
{
|
||||
|
@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
|||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t a, pte_t b)
|
||||
{
|
||||
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
|
||||
}
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte_low && !pte.pte_high;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits 0, 6 and 7 are taken in the low part of the pte,
|
||||
* put the 32 bits of offset into the high part.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_PGTABLE_H
|
||||
#define _ASM_X86_PGTABLE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
#define _PAGE_BIT_PRESENT 0 /* is present */
|
||||
|
@ -236,68 +238,82 @@ static inline unsigned long pte_pfn(pte_t pte)
|
|||
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
{
|
||||
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v | set);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
|
||||
return pte_clear_flags(pte, _PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
|
||||
return pte_clear_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_RW);
|
||||
return pte_clear_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkexec(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_NX);
|
||||
return pte_clear_flags(pte, _PAGE_NX);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_DIRTY);
|
||||
return pte_set_flags(pte, _PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_ACCESSED);
|
||||
return pte_set_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
return pte_set_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_PSE);
|
||||
return pte_set_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_PSE);
|
||||
return pte_clear_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkglobal(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_GLOBAL);
|
||||
return pte_set_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrglobal(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
|
||||
return pte_clear_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SPECIAL);
|
||||
return pte_set_flags(pte, _PAGE_SPECIAL);
|
||||
}
|
||||
|
||||
extern pteval_t __supported_pte_mask;
|
||||
|
@ -451,6 +467,190 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
|
|||
# include "pgtable_64.h"
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t a, pte_t b)
|
||||
{
|
||||
return a.pte == b.pte;
|
||||
}
|
||||
|
||||
static inline int pte_present(pte_t a)
|
||||
{
|
||||
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
|
||||
}
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
/* Only check low word on 32-bit platforms, since it might be
|
||||
out of sync with upper half. */
|
||||
return (unsigned long)native_pmd_val(pmd) == 0;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*
|
||||
* (Currently stuck as a macro because of indirect forward reference
|
||||
* to linux/mm.h:page_to_nid())
|
||||
*/
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* this function returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
||||
{
|
||||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#if PAGETABLE_LEVELS == 2
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_flags(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_page_vaddr(pud_t pud)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
{
|
||||
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
return pgd_flags(pgd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
|
||||
|
||||
/* to find an entry in a page-table-directory. */
|
||||
static inline unsigned pud_index(unsigned long address)
|
||||
{
|
||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
|
||||
}
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pgd_none(pgd_t pgd)
|
||||
{
|
||||
return !native_pgd_val(pgd);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
||||
*
|
||||
|
|
|
@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
|||
/* The boot page tables (all created as a single array) */
|
||||
extern unsigned long pg0[];
|
||||
|
||||
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
|
||||
/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
|
||||
#define pmd_none(x) (!(unsigned long)pmd_val((x)))
|
||||
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
|
||||
#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# include <asm/pgtable-3level.h>
|
||||
#else
|
||||
# include <asm/pgtable-2level.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
|
||||
static inline int pud_large(pud_t pud) { return 0; }
|
||||
|
||||
/*
|
||||
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
#define pmd_index(address) \
|
||||
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* this macro returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
#define pte_index(address) \
|
||||
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir, address) \
|
||||
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
|
||||
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
|
||||
|
||||
#define pmd_page_vaddr(pmd) \
|
||||
((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
|
||||
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
#define pte_offset_map(dir, address) \
|
||||
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
|
||||
|
@ -176,7 +133,4 @@ do { \
|
|||
#define kern_addr_valid(kaddr) (0)
|
||||
#endif
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_32_H */
|
||||
|
|
|
@ -66,9 +66,6 @@ extern void paging_init(void);
|
|||
printk("%s:%d: bad pgd %p(%016lx).\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
#define pgd_none(x) (!pgd_val(x))
|
||||
#define pud_none(x) (!pud_val(x))
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
|
||||
|
@ -133,8 +130,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
native_set_pgd(pgd, native_make_pgd(0));
|
||||
}
|
||||
|
||||
#define pte_same(a, b) ((a).pte == (b).pte)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
|
||||
|
@ -155,26 +150,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
#define pte_none(x) (!pte_val((x)))
|
||||
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@ -183,41 +158,12 @@ static inline int pmd_bad(pmd_t pmd)
|
|||
/*
|
||||
* Level 4 access.
|
||||
*/
|
||||
#define pgd_page_vaddr(pgd) \
|
||||
((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
|
||||
#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
|
||||
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
|
||||
static inline int pgd_large(pgd_t pgd) { return 0; }
|
||||
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
|
||||
|
||||
/* PUD - Level3 access */
|
||||
/* to find an entry in a page-table-directory. */
|
||||
#define pud_page_vaddr(pud) \
|
||||
((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
|
||||
#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
|
||||
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
#define pud_offset(pgd, address) \
|
||||
((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
|
||||
#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
|
||||
|
||||
static inline int pud_large(pud_t pte)
|
||||
{
|
||||
return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
/* PMD - Level 2 access */
|
||||
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
|
||||
|
||||
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
|
||||
pmd_index(address))
|
||||
#define pmd_none(x) (!pmd_val((x)))
|
||||
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
|
||||
#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
|
||||
#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
|
||||
|
||||
#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
|
||||
#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
|
||||
_PAGE_FILE })
|
||||
|
@ -225,13 +171,6 @@ static inline int pud_large(pud_t pte)
|
|||
|
||||
/* PTE - Level 1 access. */
|
||||
|
||||
/* page, protection -> pte */
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
|
||||
|
||||
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
|
||||
pte_index((address)))
|
||||
|
||||
/* x86-64 always has all page tables mapped. */
|
||||
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
|
||||
|
@ -265,9 +204,6 @@ extern int direct_gbpages;
|
|||
extern int kern_addr_valid(unsigned long addr);
|
||||
extern void cleanup_highmap(void);
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
|
|
|
@ -6,8 +6,4 @@
|
|||
#define ARCH_GET_FS 0x1003
|
||||
#define ARCH_GET_GS 0x1004
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern long sys_arch_prctl(int, unsigned long);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* _ASM_X86_PRCTL_H */
|
||||
|
|
|
@ -73,7 +73,7 @@ struct cpuinfo_x86 {
|
|||
char pad0;
|
||||
#else
|
||||
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
|
||||
int x86_tlbsize;
|
||||
int x86_tlbsize;
|
||||
__u8 x86_virt_bits;
|
||||
__u8 x86_phys_bits;
|
||||
#endif
|
||||
|
@ -393,16 +393,14 @@ union irq_stack_union {
|
|||
};
|
||||
|
||||
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
||||
DECLARE_INIT_PER_CPU(irq_stack_union);
|
||||
|
||||
static inline void load_gs_base(int cpu)
|
||||
{
|
||||
/* Memory clobbers used to order pda/percpu accesses */
|
||||
mb();
|
||||
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
|
||||
mb();
|
||||
}
|
||||
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
||||
#else /* X86_64 */
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DECLARE_PER_CPU(unsigned long, stack_canary);
|
||||
#endif
|
||||
#endif /* X86_64 */
|
||||
|
||||
extern void print_cpu_info(struct cpuinfo_x86 *);
|
||||
extern unsigned int xstate_size;
|
||||
|
@ -776,9 +774,9 @@ extern int sysenter_setup(void);
|
|||
extern struct desc_ptr early_gdt_descr;
|
||||
|
||||
extern void cpu_set_gdt(int);
|
||||
extern void switch_to_new_gdt(void);
|
||||
extern void switch_to_new_gdt(int);
|
||||
extern void load_percpu_segment(int);
|
||||
extern void cpu_init(void);
|
||||
extern void init_gdt(int cpu);
|
||||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
{
|
||||
|
|
|
@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
|
|||
|
||||
extern void check_efer(void);
|
||||
|
||||
#ifdef CONFIG_X86_BIOS_REBOOT
|
||||
extern int reboot_force;
|
||||
#else
|
||||
static const int reboot_force = 0;
|
||||
#endif
|
||||
|
||||
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ struct pt_regs {
|
|||
int xds;
|
||||
int xes;
|
||||
int xfs;
|
||||
/* int gs; */
|
||||
int xgs;
|
||||
long orig_eax;
|
||||
long eip;
|
||||
int xcs;
|
||||
|
@ -50,7 +50,7 @@ struct pt_regs {
|
|||
unsigned long ds;
|
||||
unsigned long es;
|
||||
unsigned long fs;
|
||||
/* int gs; */
|
||||
unsigned long gs;
|
||||
unsigned long orig_ax;
|
||||
unsigned long ip;
|
||||
unsigned long cs;
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
*
|
||||
* 26 - ESPFIX small SS
|
||||
* 27 - per-cpu [ offset to per-cpu data area ]
|
||||
* 28 - unused
|
||||
* 28 - stack_canary-20 [ for stack protector ]
|
||||
* 29 - unused
|
||||
* 30 - unused
|
||||
* 31 - TSS for double fault handler
|
||||
|
@ -95,6 +95,13 @@
|
|||
#define __KERNEL_PERCPU 0
|
||||
#endif
|
||||
|
||||
#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
|
||||
#else
|
||||
#define __KERNEL_STACK_CANARY 0
|
||||
#endif
|
||||
|
||||
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_SETUP_H
|
||||
#define _ASM_X86_SETUP_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -8,10 +10,8 @@
|
|||
/* Interrupt control for vSMPowered x86_64 systems */
|
||||
void vsmp_init(void);
|
||||
|
||||
|
||||
void setup_bios_corruption_check(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_VISWS
|
||||
extern void visws_early_detect(void);
|
||||
extern int is_visws_box(void);
|
||||
|
@ -43,7 +43,7 @@ struct x86_quirks {
|
|||
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
|
||||
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
|
||||
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
|
||||
unsigned short oemsize);
|
||||
unsigned short oemsize);
|
||||
int (*setup_ioapic_ids)(void);
|
||||
int (*update_genapic)(void);
|
||||
};
|
||||
|
@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
|
|||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#include <linux/pfn.h>
|
||||
|
|
|
@ -182,28 +182,9 @@ static inline int logical_smp_processor_id(void)
|
|||
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
|
||||
}
|
||||
|
||||
#include <mach_apicdef.h>
|
||||
static inline unsigned int read_apic_id(void)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
reg = *(u32 *)(APIC_BASE + APIC_ID);
|
||||
|
||||
return GET_APIC_ID(reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
|
||||
extern int hard_smp_processor_id(void);
|
||||
# else
|
||||
#include <mach_apicdef.h>
|
||||
static inline int hard_smp_processor_id(void)
|
||||
{
|
||||
/* we don't want to mark this access volatile - bad code generation */
|
||||
return read_apic_id();
|
||||
}
|
||||
# endif /* APIC_DEFINITION */
|
||||
|
||||
#else /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
|
|||
CMOS_WRITE(0xa, 0xf);
|
||||
local_flush_tlb();
|
||||
pr_debug("1.\n");
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
|
||||
*((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
|
||||
start_eip >> 4;
|
||||
pr_debug("2.\n");
|
||||
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
|
||||
*((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
|
||||
start_eip & 0xf;
|
||||
pr_debug("3.\n");
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
|
|||
*/
|
||||
CMOS_WRITE(0, 0xf);
|
||||
|
||||
*((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
|
||||
*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
|
||||
}
|
||||
|
||||
static inline void __init smpboot_setup_io_apic(void)
|
|
@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
|||
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* Define virtualization-friendly old-style lock byte lock, for use in
|
||||
* pv_lock_ops if desired.
|
||||
*
|
||||
* This differs from the pre-2.6.24 spinlock by always using xchgb
|
||||
* rather than decb to take the lock; this allows it to use a
|
||||
* zero-initialized lock structure. It also maintains a 1-byte
|
||||
* contention counter, so that we can implement
|
||||
* __byte_spin_is_contended.
|
||||
*/
|
||||
struct __byte_spinlock {
|
||||
s8 lock;
|
||||
s8 spinners;
|
||||
};
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
|
||||
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
return bl->lock != 0;
|
||||
}
|
||||
|
||||
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
return bl->spinners != 0;
|
||||
}
|
||||
|
||||
static inline void __byte_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
s8 val = 1;
|
||||
|
||||
asm("1: xchgb %1, %0\n"
|
||||
" test %1,%1\n"
|
||||
" jz 3f\n"
|
||||
" " LOCK_PREFIX "incb %2\n"
|
||||
"2: rep;nop\n"
|
||||
" cmpb $1, %0\n"
|
||||
" je 2b\n"
|
||||
" " LOCK_PREFIX "decb %2\n"
|
||||
" jmp 1b\n"
|
||||
"3:"
|
||||
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
|
||||
}
|
||||
|
||||
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
u8 old = 1;
|
||||
|
||||
asm("xchgb %1,%0"
|
||||
: "+m" (bl->lock), "+q" (old) : : "memory");
|
||||
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
smp_wmb();
|
||||
bl->lock = 0;
|
||||
}
|
||||
#else /* !CONFIG_PARAVIRT */
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_locked(lock);
|
||||
|
@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
|||
__raw_spin_lock(lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
#endif
|
||||
|
||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
{
|
||||
|
@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
|||
{
|
||||
atomic_t *count = (atomic_t *)lock;
|
||||
|
||||
atomic_dec(count);
|
||||
if (atomic_read(count) >= 0)
|
||||
if (atomic_dec_return(count) >= 0)
|
||||
return 1;
|
||||
atomic_inc(count);
|
||||
return 0;
|
||||
|
|
|
@ -1,8 +1,54 @@
|
|||
/*
|
||||
* GCC stack protector support.
|
||||
*
|
||||
* Stack protector works by putting predefined pattern at the start of
|
||||
* the stack frame and verifying that it hasn't been overwritten when
|
||||
* returning from the function. The pattern is called stack canary
|
||||
* and unfortunately gcc requires it to be at a fixed offset from %gs.
|
||||
* On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
|
||||
* and x86_32 use segment registers differently and thus handles this
|
||||
* requirement differently.
|
||||
*
|
||||
* On x86_64, %gs is shared by percpu area and stack canary. All
|
||||
* percpu symbols are zero based and %gs points to the base of percpu
|
||||
* area. The first occupant of the percpu area is always
|
||||
* irq_stack_union which contains stack_canary at offset 40. Userland
|
||||
* %gs is always saved and restored on kernel entry and exit using
|
||||
* swapgs, so stack protector doesn't add any complexity there.
|
||||
*
|
||||
* On x86_32, it's slightly more complicated. As in x86_64, %gs is
|
||||
* used for userland TLS. Unfortunately, some processors are much
|
||||
* slower at loading segment registers with different value when
|
||||
* entering and leaving the kernel, so the kernel uses %fs for percpu
|
||||
* area and manages %gs lazily so that %gs is switched only when
|
||||
* necessary, usually during task switch.
|
||||
*
|
||||
* As gcc requires the stack canary at %gs:20, %gs can't be managed
|
||||
* lazily if stack protector is enabled, so the kernel saves and
|
||||
* restores userland %gs on kernel entry and exit. This behavior is
|
||||
* controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
|
||||
* system.h to hide the details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_STACKPROTECTOR_H
|
||||
#define _ASM_STACKPROTECTOR_H 1
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/desc.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
/*
|
||||
* 24 byte read-only segment initializer for stack canary. Linker
|
||||
* can't handle the address bit shifting. Address will be set in
|
||||
* head_32 for boot CPU and setup_per_cpu_areas() for others.
|
||||
*/
|
||||
#define GDT_STACK_CANARY_INIT \
|
||||
[GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
|
||||
|
||||
/*
|
||||
* Initialize the stackprotector canary value.
|
||||
|
@ -15,12 +61,9 @@ static __always_inline void boot_init_stack_canary(void)
|
|||
u64 canary;
|
||||
u64 tsc;
|
||||
|
||||
/*
|
||||
* Build time only check to make sure the stack_canary is at
|
||||
* offset 40 in the pda; this is a gcc ABI requirement
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
|
||||
|
||||
#endif
|
||||
/*
|
||||
* We both use the random pool and the current TSC as a source
|
||||
* of randomness. The TSC only matters for very early init,
|
||||
|
@ -32,7 +75,50 @@ static __always_inline void boot_init_stack_canary(void)
|
|||
canary += tsc + (tsc << 32UL);
|
||||
|
||||
current->stack_canary = canary;
|
||||
#ifdef CONFIG_X86_64
|
||||
percpu_write(irq_stack_union.stack_canary, canary);
|
||||
#else
|
||||
percpu_write(stack_canary, canary);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void setup_stack_canary_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
|
||||
struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
|
||||
struct desc_struct desc;
|
||||
|
||||
desc = gdt_table[GDT_ENTRY_STACK_CANARY];
|
||||
desc.base0 = canary & 0xffff;
|
||||
desc.base1 = (canary >> 16) & 0xff;
|
||||
desc.base2 = (canary >> 24) & 0xff;
|
||||
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void load_stack_canary_segment(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* CC_STACKPROTECTOR */
|
||||
|
||||
#define GDT_STACK_CANARY_INIT
|
||||
|
||||
/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
|
||||
|
||||
static inline void setup_stack_canary_segment(int cpu)
|
||||
{ }
|
||||
|
||||
static inline void load_stack_canary_segment(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile ("mov %0, %%gs" : : "r" (0));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
#endif /* _ASM_STACKPROTECTOR_H */
|
||||
|
|
|
@ -1,202 +0,0 @@
|
|||
#ifndef __ASM_SUMMIT_APIC_H
|
||||
#define __ASM_SUMMIT_APIC_H
|
||||
|
||||
#include <asm/smp.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#define esr_disable (1)
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
|
||||
/* In clustered mode, the high nibble of APIC ID is a cluster number.
|
||||
* The low nibble is a 4-bit bitmap. */
|
||||
#define XAPIC_DEST_CPUS_SHIFT 4
|
||||
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
|
||||
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
/* CPU_MASK_ALL (0xff) has undefined behaviour with
|
||||
* dest_LowestPrio mode logical clustered apic interrupt routing
|
||||
* Just start on cpu 0. IRQ balancing will spread load
|
||||
*/
|
||||
return &cpumask_of_cpu(0);
|
||||
}
|
||||
|
||||
#define INT_DELIVERY_MODE (dest_LowestPrio)
|
||||
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we don't use the phys_cpu_present_map to indicate apicid presence */
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val, id;
|
||||
int count = 0;
|
||||
u8 my_id = (u8)hard_smp_processor_id();
|
||||
u8 my_cluster = (u8)apicid_cluster(my_id);
|
||||
#ifdef CONFIG_SMP
|
||||
u8 lid;
|
||||
int i;
|
||||
|
||||
/* Create logical APIC IDs by counting CPUs already in cluster. */
|
||||
for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
|
||||
lid = cpu_2_logical_apicid[i];
|
||||
if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
|
||||
++count;
|
||||
}
|
||||
#endif
|
||||
/* We only have a 4 wide bitmap in cluster mode. If a deranged
|
||||
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
|
||||
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
|
||||
id = my_cluster | (1UL << count);
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
|
||||
nr_ioapics);
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0x0F);
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int apicid)
|
||||
{
|
||||
return physid_mask_of_physid(0);
|
||||
}
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set >= nr_cpu_ids)
|
||||
return (int) 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, *cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return 0xFF;
|
||||
}
|
||||
apicid = apicid | new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = cpu_to_logical_apicid(0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return apicid;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = cpu_mask_to_apicid(cpumask);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
/* cpuid returns the value latched in the HW at reset, not the APIC ID
|
||||
* register's value. For any box whose BIOS changes APIC IDs, like
|
||||
* clustered APIC systems, we must use hard_smp_processor_id.
|
||||
*
|
||||
* See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
|
||||
*/
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return hard_smp_processor_id() >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SUMMIT_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_SUMMIT_APICDEF_H
|
||||
#define __ASM_SUMMIT_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (x>>24)&0xFF;
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,26 +0,0 @@
|
|||
#ifndef __ASM_SUMMIT_IPI_H
|
||||
#define __ASM_SUMMIT_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const cpumask_t *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(&mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(&cpu_online_map, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SUMMIT_IPI_H */
|
|
@ -1,109 +0,0 @@
|
|||
#ifndef __ASM_SUMMIT_MPPARSE_H
|
||||
#define __ASM_SUMMIT_MPPARSE_H
|
||||
|
||||
#include <asm/tsc.h>
|
||||
|
||||
extern int use_cyclone;
|
||||
|
||||
#ifdef CONFIG_X86_SUMMIT_NUMA
|
||||
extern void setup_summit(void);
|
||||
#else
|
||||
#define setup_summit() {}
|
||||
#endif
|
||||
|
||||
static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
|
||||
char *productid)
|
||||
{
|
||||
if (!strncmp(oem, "IBM ENSW", 8) &&
|
||||
(!strncmp(productid, "VIGIL SMP", 9)
|
||||
|| !strncmp(productid, "EXA", 3)
|
||||
|| !strncmp(productid, "RUTHLESS SMP", 12))){
|
||||
mark_tsc_unstable("Summit based system");
|
||||
use_cyclone = 1; /*enable cyclone-timer*/
|
||||
setup_summit();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Hook from generic ACPI tables.c */
|
||||
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
if (!strncmp(oem_id, "IBM", 3) &&
|
||||
(!strncmp(oem_table_id, "SERVIGIL", 8)
|
||||
|| !strncmp(oem_table_id, "EXA", 3))){
|
||||
mark_tsc_unstable("Summit based system");
|
||||
use_cyclone = 1; /*enable cyclone-timer*/
|
||||
setup_summit();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct rio_table_hdr {
|
||||
unsigned char version; /* Version number of this data structure */
|
||||
/* Version 3 adds chassis_num & WP_index */
|
||||
unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
|
||||
unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
|
||||
} __attribute__((packed));
|
||||
|
||||
struct scal_detail {
|
||||
unsigned char node_id; /* Scalability Node ID */
|
||||
unsigned long CBAR; /* Address of 1MB register space */
|
||||
unsigned char port0node; /* Node ID port connected to: 0xFF=None */
|
||||
unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
|
||||
unsigned char port1node; /* Node ID port connected to: 0xFF = None */
|
||||
unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
|
||||
unsigned char port2node; /* Node ID port connected to: 0xFF = None */
|
||||
unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
|
||||
unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
|
||||
} __attribute__((packed));
|
||||
|
||||
struct rio_detail {
|
||||
unsigned char node_id; /* RIO Node ID */
|
||||
unsigned long BBAR; /* Address of 1MB register space */
|
||||
unsigned char type; /* Type of device */
|
||||
unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
|
||||
/* For CYC: Node ID of Twister that owns this CYC */
|
||||
unsigned char port0node; /* Node ID port connected to: 0xFF=None */
|
||||
unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
|
||||
unsigned char port1node; /* Node ID port connected to: 0xFF=None */
|
||||
unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
|
||||
unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
|
||||
/* For CYC: 0 */
|
||||
unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
|
||||
/* = 0 : the XAPIC is not used, ie:*/
|
||||
/* ints fwded to another XAPIC */
|
||||
/* Bits1:7 Reserved */
|
||||
/* For CYC: Bits0:7 Reserved */
|
||||
unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
|
||||
/* lower slot numbers/PCI bus numbers */
|
||||
/* For CYC: No meaning */
|
||||
unsigned char chassis_num; /* 1 based Chassis number */
|
||||
/* For LookOut WPEGs this field indicates the */
|
||||
/* Expansion Chassis #, enumerated from Boot */
|
||||
/* Node WPEG external port, then Boot Node CYC */
|
||||
/* external port, then Next Vigil chassis WPEG */
|
||||
/* external port, etc. */
|
||||
/* Shared Lookouts have only 1 chassis number (the */
|
||||
/* first one assigned) */
|
||||
} __attribute__((packed));
|
||||
|
||||
|
||||
typedef enum {
|
||||
CompatTwister = 0, /* Compatibility Twister */
|
||||
AltTwister = 1, /* Alternate Twister of internal 8-way */
|
||||
CompatCyclone = 2, /* Compatibility Cyclone */
|
||||
AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
|
||||
CompatWPEG = 4, /* Compatibility WPEG */
|
||||
AltWPEG = 5, /* Second Planar WPEG */
|
||||
LookOutAWPEG = 6, /* LookOut WPEG */
|
||||
LookOutBWPEG = 7, /* LookOut WPEG */
|
||||
} node_type;
|
||||
|
||||
static inline int is_WPEG(struct rio_detail *rio){
|
||||
return (rio->type == CompatWPEG || rio->type == AltWPEG ||
|
||||
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SUMMIT_MPPARSE_H */
|
|
@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
|
|||
/* X86_32 only */
|
||||
#ifdef CONFIG_X86_32
|
||||
/* kernel/process_32.c */
|
||||
asmlinkage int sys_fork(struct pt_regs);
|
||||
asmlinkage int sys_clone(struct pt_regs);
|
||||
asmlinkage int sys_vfork(struct pt_regs);
|
||||
asmlinkage int sys_execve(struct pt_regs);
|
||||
int sys_fork(struct pt_regs *);
|
||||
int sys_clone(struct pt_regs *);
|
||||
int sys_vfork(struct pt_regs *);
|
||||
int sys_execve(struct pt_regs *);
|
||||
|
||||
/* kernel/signal_32.c */
|
||||
asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
|
||||
asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
|
||||
struct old_sigaction __user *);
|
||||
asmlinkage int sys_sigaltstack(unsigned long);
|
||||
asmlinkage unsigned long sys_sigreturn(unsigned long);
|
||||
asmlinkage int sys_rt_sigreturn(unsigned long);
|
||||
int sys_sigaltstack(struct pt_regs *);
|
||||
unsigned long sys_sigreturn(struct pt_regs *);
|
||||
long sys_rt_sigreturn(struct pt_regs *);
|
||||
|
||||
/* kernel/ioport.c */
|
||||
asmlinkage long sys_iopl(unsigned long);
|
||||
long sys_iopl(struct pt_regs *);
|
||||
|
||||
/* kernel/sys_i386_32.c */
|
||||
asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
|
||||
|
@ -59,8 +59,8 @@ struct oldold_utsname;
|
|||
asmlinkage int sys_olduname(struct oldold_utsname __user *);
|
||||
|
||||
/* kernel/vm86_32.c */
|
||||
asmlinkage int sys_vm86old(struct pt_regs);
|
||||
asmlinkage int sys_vm86(struct pt_regs);
|
||||
int sys_vm86old(struct pt_regs *);
|
||||
int sys_vm86(struct pt_regs *);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
|
|
|
@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#define __switch_canary \
|
||||
"movl %P[task_canary](%[next]), %%ebx\n\t" \
|
||||
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
|
||||
#define __switch_canary_oparam \
|
||||
, [stack_canary] "=m" (per_cpu_var(stack_canary))
|
||||
#define __switch_canary_iparam \
|
||||
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
||||
#else /* CC_STACKPROTECTOR */
|
||||
#define __switch_canary
|
||||
#define __switch_canary_oparam
|
||||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
/*
|
||||
* Saving eflags is important. It switches not only IOPL between tasks,
|
||||
* it also protects other tasks from NT leaking through sysenter etc.
|
||||
|
@ -44,6 +58,7 @@ do { \
|
|||
"movl %[next_sp],%%esp\n\t" /* restore ESP */ \
|
||||
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \
|
||||
"pushl %[next_ip]\n\t" /* restore EIP */ \
|
||||
__switch_canary \
|
||||
"jmp __switch_to\n" /* regparm call */ \
|
||||
"1:\t" \
|
||||
"popl %%ebp\n\t" /* restore EBP */ \
|
||||
|
@ -58,6 +73,8 @@ do { \
|
|||
"=b" (ebx), "=c" (ecx), "=d" (edx), \
|
||||
"=S" (esi), "=D" (edi) \
|
||||
\
|
||||
__switch_canary_oparam \
|
||||
\
|
||||
/* input parameters: */ \
|
||||
: [next_sp] "m" (next->thread.sp), \
|
||||
[next_ip] "m" (next->thread.ip), \
|
||||
|
@ -66,6 +83,8 @@ do { \
|
|||
[prev] "a" (prev), \
|
||||
[next] "d" (next) \
|
||||
\
|
||||
__switch_canary_iparam \
|
||||
\
|
||||
: /* reloaded segment registers */ \
|
||||
"memory"); \
|
||||
} while (0)
|
||||
|
@ -111,16 +130,16 @@ do { \
|
|||
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
||||
__switch_canary \
|
||||
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
||||
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
"movq %%rax,%%rdi\n\t" \
|
||||
"jc ret_from_fork\n\t" \
|
||||
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
"jnz ret_from_fork\n\t" \
|
||||
RESTORE_CONTEXT \
|
||||
: "=a" (last) \
|
||||
__switch_canary_oparam \
|
||||
: [next] "S" (next), [prev] "D" (prev), \
|
||||
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
|
||||
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
||||
[tif_fork] "i" (TIF_FORK), \
|
||||
[_tif_fork] "i" (_TIF_FORK), \
|
||||
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
||||
[current_task] "m" (per_cpu_var(current_task)) \
|
||||
__switch_canary_iparam \
|
||||
|
@ -182,6 +201,25 @@ extern void native_load_gs_index(unsigned);
|
|||
#define savesegment(seg, value) \
|
||||
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
|
||||
|
||||
/*
|
||||
* x86_32 user gs accessors.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#ifdef CONFIG_X86_32_LAZY_GS
|
||||
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
|
||||
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
|
||||
#define task_user_gs(tsk) ((tsk)->thread.gs)
|
||||
#define lazy_save_gs(v) savesegment(gs, (v))
|
||||
#define lazy_load_gs(v) loadsegment(gs, (v))
|
||||
#else /* X86_32_LAZY_GS */
|
||||
#define get_user_gs(regs) (u16)((regs)->gs)
|
||||
#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
|
||||
#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
|
||||
#define lazy_save_gs(v) do { } while (0)
|
||||
#define lazy_load_gs(v) do { } while (0)
|
||||
#endif /* X86_32_LAZY_GS */
|
||||
#endif /* X86_32 */
|
||||
|
||||
static inline unsigned long get_limit(unsigned long segment)
|
||||
{
|
||||
unsigned long __limit;
|
||||
|
|
|
@ -40,6 +40,7 @@ struct thread_info {
|
|||
*/
|
||||
__u8 supervisor_stack[0];
|
||||
#endif
|
||||
int uaccess_err;
|
||||
};
|
||||
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
|
|
|
@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
|
|||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
|
@ -120,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
|
|||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
extern void setup_node_to_cpumask_map(void);
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
|
@ -218,6 +222,8 @@ static inline int node_to_first_cpu(int node)
|
|||
return first_cpu(cpu_online_map);
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
|
|
|
@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
|
|||
dotraplinkage void do_overflow(struct pt_regs *, long);
|
||||
dotraplinkage void do_bounds(struct pt_regs *, long);
|
||||
dotraplinkage void do_invalid_op(struct pt_regs *, long);
|
||||
dotraplinkage void do_device_not_available(struct pt_regs);
|
||||
dotraplinkage void do_device_not_available(struct pt_regs *, long);
|
||||
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
|
||||
dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
|
||||
dotraplinkage void do_segment_not_present(struct pt_regs *, long);
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче