Merge branch 'x86/core' into core/ipi
This commit is contained in:
Коммит
cd80a8142e
|
@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
|
|||
these macros in include/asm-XXX/topology.h:
|
||||
#define topology_physical_package_id(cpu)
|
||||
#define topology_core_id(cpu)
|
||||
#define topology_thread_siblings(cpu)
|
||||
#define topology_core_siblings(cpu)
|
||||
#define topology_thread_cpumask(cpu)
|
||||
#define topology_core_cpumask(cpu)
|
||||
|
||||
The type of **_id is int.
|
||||
The type of siblings is cpumask_t.
|
||||
The type of siblings is (const) struct cpumask *.
|
||||
|
||||
To be consistent on all architectures, include/linux/topology.h
|
||||
provides default definitions for any of the above macros that are
|
||||
|
|
|
@ -1310,8 +1310,13 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
|
||||
memtest= [KNL,X86] Enable memtest
|
||||
Format: <integer>
|
||||
range: 0,4 : pattern number
|
||||
default : 0 <disable>
|
||||
Specifies the number of memtest passes to be
|
||||
performed. Each pass selects another test
|
||||
pattern from a given set of patterns. Memtest
|
||||
fills the memory with this pattern, validates
|
||||
memory contents and reserves bad memory
|
||||
regions that are detected.
|
||||
|
||||
meye.*= [HW] Set MotionEye Camera parameters
|
||||
See Documentation/video4linux/meye.txt.
|
||||
|
|
|
@ -158,7 +158,7 @@ Offset Proto Name Meaning
|
|||
0202/4 2.00+ header Magic signature "HdrS"
|
||||
0206/2 2.00+ version Boot protocol version supported
|
||||
0208/4 2.00+ realmode_swtch Boot loader hook (see below)
|
||||
020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete)
|
||||
020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
|
||||
020E/2 2.00+ kernel_version Pointer to kernel version string
|
||||
0210/1 2.00+ type_of_loader Boot loader identifier
|
||||
0211/1 2.00+ loadflags Boot protocol option flags
|
||||
|
@ -170,10 +170,11 @@ Offset Proto Name Meaning
|
|||
0224/2 2.01+ heap_end_ptr Free memory after setup end
|
||||
0226/2 N/A pad1 Unused
|
||||
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
|
||||
022C/4 2.03+ initrd_addr_max Highest legal initrd address
|
||||
022C/4 2.03+ ramdisk_max Highest legal initrd address
|
||||
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
|
||||
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
|
||||
0235/3 N/A pad2 Unused
|
||||
0235/1 N/A pad2 Unused
|
||||
0236/2 N/A pad3 Unused
|
||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
||||
|
@ -299,14 +300,14 @@ Protocol: 2.00+
|
|||
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
|
||||
10.17.
|
||||
|
||||
Field name: readmode_swtch
|
||||
Field name: realmode_swtch
|
||||
Type: modify (optional)
|
||||
Offset/size: 0x208/4
|
||||
Protocol: 2.00+
|
||||
|
||||
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
|
||||
|
||||
Field name: start_sys
|
||||
Field name: start_sys_seg
|
||||
Type: read
|
||||
Offset/size: 0x20c/2
|
||||
Protocol: 2.00+
|
||||
|
@ -468,7 +469,7 @@ Protocol: 2.02+
|
|||
zero, the kernel will assume that your boot loader does not support
|
||||
the 2.02+ protocol.
|
||||
|
||||
Field name: initrd_addr_max
|
||||
Field name: ramdisk_max
|
||||
Type: read
|
||||
Offset/size: 0x22c/4
|
||||
Protocol: 2.03+
|
||||
|
@ -542,7 +543,10 @@ Protocol: 2.08+
|
|||
|
||||
The payload may be compressed. The format of both the compressed and
|
||||
uncompressed data should be determined using the standard magic
|
||||
numbers. Currently only gzip compressed ELF is used.
|
||||
numbers. The currently supported compression formats are gzip
|
||||
(magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
|
||||
(magic number 5D 00). The uncompressed payload is currently always ELF
|
||||
(magic number 7F 45 4C 46).
|
||||
|
||||
Field name: payload_length
|
||||
Type: read
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
|
||||
Mini-HOWTO for using the earlyprintk=dbgp boot option with a
|
||||
USB2 Debug port key and a debug cable, on x86 systems.
|
||||
|
||||
You need two computers, the 'USB debug key' special gadget and
|
||||
and two USB cables, connected like this:
|
||||
|
||||
[host/target] <-------> [USB debug key] <-------> [client/console]
|
||||
|
||||
1. There are three specific hardware requirements:
|
||||
|
||||
a.) Host/target system needs to have USB debug port capability.
|
||||
|
||||
You can check this capability by looking at a 'Debug port' bit in
|
||||
the lspci -vvv output:
|
||||
|
||||
# lspci -vvv
|
||||
...
|
||||
00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
|
||||
Subsystem: Lenovo ThinkPad T61
|
||||
Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
|
||||
Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
|
||||
Latency: 0
|
||||
Interrupt: pin D routed to IRQ 19
|
||||
Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
|
||||
Capabilities: [50] Power Management version 2
|
||||
Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
|
||||
Status: D0 PME-Enable- DSel=0 DScale=0 PME+
|
||||
Capabilities: [58] Debug port: BAR=1 offset=00a0
|
||||
^^^^^^^^^^^ <==================== [ HERE ]
|
||||
Kernel driver in use: ehci_hcd
|
||||
Kernel modules: ehci-hcd
|
||||
...
|
||||
|
||||
( If your system does not list a debug port capability then you probably
|
||||
wont be able to use the USB debug key. )
|
||||
|
||||
b.) You also need a Netchip USB debug cable/key:
|
||||
|
||||
http://www.plxtech.com/products/NET2000/NET20DC/default.asp
|
||||
|
||||
This is a small blue plastic connector with two USB connections,
|
||||
it draws power from its USB connections.
|
||||
|
||||
c.) Thirdly, you need a second client/console system with a regular USB port.
|
||||
|
||||
2. Software requirements:
|
||||
|
||||
a.) On the host/target system:
|
||||
|
||||
You need to enable the following kernel config option:
|
||||
|
||||
CONFIG_EARLY_PRINTK_DBGP=y
|
||||
|
||||
And you need to add the boot command line: "earlyprintk=dbgp".
|
||||
(If you are using Grub, append it to the 'kernel' line in
|
||||
/etc/grub.conf)
|
||||
|
||||
NOTE: normally earlyprintk console gets turned off once the
|
||||
regular console is alive - use "earlyprintk=dbgp,keep" to keep
|
||||
this channel open beyond early bootup. This can be useful for
|
||||
debugging crashes under Xorg, etc.
|
||||
|
||||
b.) On the client/console system:
|
||||
|
||||
You should enable the following kernel config option:
|
||||
|
||||
CONFIG_USB_SERIAL_DEBUG=y
|
||||
|
||||
On the next bootup with the modified kernel you should
|
||||
get a /dev/ttyUSBx device(s).
|
||||
|
||||
Now this channel of kernel messages is ready to be used: start
|
||||
your favorite terminal emulator (minicom, etc.) and set
|
||||
it up to use /dev/ttyUSB0 - or use a raw 'cat /dev/ttyUSBx' to
|
||||
see the raw output.
|
||||
|
||||
c.) On Nvidia Southbridge based systems: the kernel will try to probe
|
||||
and find out which port has debug device connected.
|
||||
|
||||
3. Testing that it works fine:
|
||||
|
||||
You can test the output by using earlyprintk=dbgp,keep and provoking
|
||||
kernel messages on the host/target system. You can provoke a harmless
|
||||
kernel message by for example doing:
|
||||
|
||||
echo h > /proc/sysrq-trigger
|
||||
|
||||
On the host/target system you should see this help line in "dmesg" output:
|
||||
|
||||
SysRq : HELP : loglevel(0-9) reBoot Crashdump terminate-all-tasks(E) memory-full-oom-kill(F) kill-all-tasks(I) saK show-backtrace-all-active-cpus(L) show-memory-usage(M) nice-all-RT-tasks(N) powerOff show-registers(P) show-all-timers(Q) unRaw Sync show-task-states(T) Unmount show-blocked-tasks(W) dump-ftrace-buffer(Z)
|
||||
|
||||
On the client/console system do:
|
||||
|
||||
cat /dev/ttyUSB0
|
||||
|
||||
And you should see the help line above displayed shortly after you've
|
||||
provoked it on the host system.
|
||||
|
||||
If it does not work then please ask about it on the linux-kernel@vger.kernel.org
|
||||
mailing list or contact the x86 maintainers.
|
3
Makefile
3
Makefile
|
@ -533,8 +533,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
|
|||
endif
|
||||
|
||||
# Force gcc to behave correct even for buggy distributions
|
||||
# Arch Makefiles may override this setting
|
||||
ifndef CONFIG_CC_STACKPROTECTOR
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FRAME_POINTER
|
||||
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ALPHA_STATFS_H
|
||||
#define _ALPHA_STATFS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Alpha is the only 64-bit platform with 32-bit statfs. And doesn't
|
||||
even seem to implement statfs64 */
|
||||
#define __statfs_word __u32
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _ALPHA_SWAB_H
|
||||
#define _ALPHA_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
|
|||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||
last_cpu = cpu;
|
||||
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -189,9 +189,21 @@ callback_init(void * kernel_end)
|
|||
|
||||
if (alpha_using_srm) {
|
||||
static struct vm_struct console_remap_vm;
|
||||
unsigned long vaddr = VMALLOC_START;
|
||||
unsigned long nr_pages = 0;
|
||||
unsigned long vaddr;
|
||||
unsigned long i, j;
|
||||
|
||||
/* calculate needed size */
|
||||
for (i = 0; i < crb->map_entries; ++i)
|
||||
nr_pages += crb->map[i].count;
|
||||
|
||||
/* register the vm area */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.size = nr_pages << PAGE_SHIFT;
|
||||
vm_area_register_early(&console_remap_vm, PAGE_SIZE);
|
||||
|
||||
vaddr = (unsigned long)console_remap_vm.addr;
|
||||
|
||||
/* Set up the third level PTEs and update the virtual
|
||||
addresses of the CRB entries. */
|
||||
for (i = 0; i < crb->map_entries; ++i) {
|
||||
|
@ -213,12 +225,6 @@ callback_init(void * kernel_end)
|
|||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Let vmalloc know that we've allocated some space. */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.addr = (void *) VMALLOC_START;
|
||||
console_remap_vm.size = vaddr - VMALLOC_START;
|
||||
vmlist = &console_remap_vm;
|
||||
}
|
||||
|
||||
callback_init_done = 1;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define __ARM_A_OUT_H__
|
||||
|
||||
#include <linux/personality.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct exec
|
||||
{
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#ifndef __ASMARM_SETUP_H
|
||||
#define __ASMARM_SETUP_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define COMMAND_LINE_SIZE 1024
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define __ASM_ARM_SWAB_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
|
||||
# define __SWAB_64_THRU_32__
|
||||
|
|
|
@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
|
|||
.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
|
||||
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
|
||||
* come via this function. Instead, they should provide their
|
||||
|
@ -161,7 +166,7 @@ void __init init_IRQ(void)
|
|||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bad_irq_desc.affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(bad_irq_desc.affinity);
|
||||
bad_irq_desc.cpu = smp_processor_id();
|
||||
#endif
|
||||
init_arch_irq();
|
||||
|
@ -191,15 +196,16 @@ void migrate_irqs(void)
|
|||
struct irq_desc *desc = irq_desc + i;
|
||||
|
||||
if (desc->cpu == cpu) {
|
||||
unsigned int newcpu = any_online_cpu(desc->affinity);
|
||||
|
||||
if (newcpu == NR_CPUS) {
|
||||
unsigned int newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
|
||||
i, cpu);
|
||||
|
||||
cpus_setall(desc->affinity);
|
||||
newcpu = any_online_cpu(desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
route_irq(desc, i, newcpu);
|
||||
|
|
|
@ -65,6 +65,7 @@ SECTIONS
|
|||
#endif
|
||||
. = ALIGN(4096);
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu.page_aligned)
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
|
|
|
@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
|
|||
const struct cpumask *mask = cpumask_of(cpu);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
desc->affinity = *mask;
|
||||
cpumask_copy(desc->affinity, mask);
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
|
|||
config QUICKLIST
|
||||
def_bool y
|
||||
|
||||
config HAVE_ARCH_BOOTMEM_NODE
|
||||
config HAVE_ARCH_BOOTMEM
|
||||
def_bool n
|
||||
|
||||
config ARCH_HAVE_MEMORY_PRESENT
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#ifndef __ASM_AVR32_SWAB_H
|
||||
#define __ASM_AVR32_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define __SWAB_64_THRU_32__
|
||||
|
|
|
@ -3,14 +3,4 @@
|
|||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE 8192
|
||||
#else
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#endif
|
||||
|
||||
#define PERCPU_ENOUGH_ROOM \
|
||||
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
||||
PERCPU_MODULE_RESERVE)
|
||||
|
||||
#endif /* __ARCH_BLACKFIN_PERCPU__ */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _BLACKFIN_SWAB_H
|
||||
#define _BLACKFIN_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
|
||||
|
|
|
@ -70,6 +70,11 @@ static struct irq_desc bad_irq_desc = {
|
|||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating a variable-sized bad_irq_desc.affinity */
|
||||
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _H8300_SWAB_H
|
||||
#define _H8300_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
|
||||
# define __SWAB_64_THRU_32__
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
/* floating point status register: */
|
||||
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
|
||||
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* define this macro to get some asm stmts included in 'c' files */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
/* include compiler specific intrinsics */
|
||||
#include <asm/ia64regs.h>
|
||||
#ifdef __INTEL_COMPILER
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/* Select x86 specific features in <linux/kvm.h> */
|
||||
|
|
|
@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
|
|||
|
||||
#else /* ! SMP */
|
||||
|
||||
#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
|
||||
|
||||
#define per_cpu_init() (__phys_per_cpu_start)
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
|
||||
/*
|
||||
* Be extremely careful when taking the address of this variable! Due to virtual
|
||||
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/intrinsics.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
|
|||
.child = NULL, \
|
||||
.groups = NULL, \
|
||||
.min_interval = 8, \
|
||||
.max_interval = 8*(min(num_online_cpus(), 32)), \
|
||||
.max_interval = 8*(min(num_online_cpus(), 32U)), \
|
||||
.busy_factor = 64, \
|
||||
.imbalance_pct = 125, \
|
||||
.cache_nice_tries = 2, \
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#ifndef _ASM_IA64_UV_UV_H
|
||||
#define _ASM_IA64_UV_UV_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/sn/simulator.h>
|
||||
|
||||
static inline int is_uv_system(void)
|
||||
{
|
||||
/* temporary support for running on hardware simulator */
|
||||
return IS_MEDUSA() || ia64_platform_is("uv");
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_UV_UV_H */
|
|
@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
|
|||
return __va(phys_addr);
|
||||
}
|
||||
|
||||
void __init __acpi_unmap_table(char *map, unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Boot-time Table Parsing
|
||||
-------------------------------------------------------------------------- */
|
||||
|
|
|
@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
|
|||
if (iosapic_intr_info[irq].count == 0) {
|
||||
#ifdef CONFIG_SMP
|
||||
/* Clear affinity */
|
||||
cpus_setall(idesc->affinity);
|
||||
cpumask_setall(idesc->affinity);
|
||||
#endif
|
||||
/* Clear the interrupt information */
|
||||
iosapic_intr_info[irq].dest = 0;
|
||||
|
|
|
@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
|
|||
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
||||
{
|
||||
if (irq < NR_IRQS) {
|
||||
cpumask_copy(&irq_desc[irq].affinity,
|
||||
cpumask_copy(irq_desc[irq].affinity,
|
||||
cpumask_of(cpu_logical_id(hwid)));
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ static void migrate_irqs(void)
|
|||
if (desc->status == IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
|
||||
>= nr_cpu_ids) {
|
||||
/*
|
||||
* Save it for phase 2 processing
|
||||
|
|
|
@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
|||
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
|
||||
ia64_srlz_d();
|
||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||
struct irq_desc *desc = irq_to_desc(vector);
|
||||
|
||||
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||
smp_local_flush_tlb();
|
||||
kstat_this_cpu.irqs[vector]++;
|
||||
kstat_incr_irqs_this_cpu(vector, desc);
|
||||
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||
kstat_this_cpu.irqs[vector]++;
|
||||
kstat_incr_irqs_this_cpu(vector, desc);
|
||||
else {
|
||||
int irq = local_vector_to_irq(vector);
|
||||
|
||||
|
@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
|
|||
* Perform normal interrupt style processing
|
||||
*/
|
||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||
struct irq_desc *desc = irq_to_desc(vector);
|
||||
|
||||
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||
smp_local_flush_tlb();
|
||||
kstat_this_cpu.irqs[vector]++;
|
||||
kstat_incr_irqs_this_cpu(vector, desc);
|
||||
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||
kstat_this_cpu.irqs[vector]++;
|
||||
kstat_incr_irqs_this_cpu(vector, desc);
|
||||
else {
|
||||
struct pt_regs *old_regs = set_irq_regs(NULL);
|
||||
int irq = local_vector_to_irq(vector);
|
||||
|
|
|
@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
|||
msg.data = data;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
|||
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
irq_desc[irq].affinity = *mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -219,6 +219,7 @@ SECTIONS
|
|||
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
|
||||
{
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu.page_aligned)
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
|
|
|
@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
|||
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = *cpu_mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
|
|||
*/
|
||||
#define IRQ_AFFINITY_HOOK(irq) \
|
||||
do { \
|
||||
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
|
||||
if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
|
||||
smtc_forward_irq(irq); \
|
||||
irq_exit(); \
|
||||
return; \
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef _ASM_SIGCONTEXT_H
|
||||
#define _ASM_SIGCONTEXT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/sgidefs.h>
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI32
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#define _ASM_SWAB_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define __SWAB_64_THRU_32__
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
|
||||
}
|
||||
irq_desc[irq].affinity = *cpumask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
}
|
||||
|
|
|
@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
|
|||
* and efficiency, we just pick the easiest one to find.
|
||||
*/
|
||||
|
||||
target = first_cpu(irq_desc[irq].affinity);
|
||||
target = cpumask_first(irq_desc[irq].affinity);
|
||||
|
||||
/*
|
||||
* We depend on the platform code to have correctly processed
|
||||
|
@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
|
|||
struct clock_event_device *cd;
|
||||
void *arg_copy = pipi->arg;
|
||||
int type_copy = pipi->type;
|
||||
int irq = MIPS_CPU_IRQ_BASE + 1;
|
||||
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
switch (type_copy) {
|
||||
case SMTC_CLOCK_TICK:
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
irq_exit();
|
||||
|
|
|
@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
|
|||
|
||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_t tmask = *affinity;
|
||||
cpumask_t tmask;
|
||||
int cpu = 0;
|
||||
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
|
||||
|
||||
|
@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
|||
* be made to forward to an offline "CPU".
|
||||
*/
|
||||
|
||||
cpumask_copy(&tmask, affinity);
|
||||
for_each_cpu(cpu, affinity) {
|
||||
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
|
||||
cpu_clear(cpu, tmask);
|
||||
}
|
||||
irq_desc[irq].affinity = tmask;
|
||||
cpumask_copy(irq_desc[irq].affinity, &tmask);
|
||||
|
||||
if (cpus_empty(tmask))
|
||||
/*
|
||||
|
|
|
@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
|
|||
int irq = SGI_BUSERR_IRQ;
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
ip22_be_interrupt(irq);
|
||||
irq_exit();
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
|
|||
char c;
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
|
||||
ArcRead(0, &c, 1, &cnt);
|
||||
ArcEnterInteractiveMode();
|
||||
|
|
|
@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
|
|||
void bcm1480_mailbox_interrupt(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int irq = K_BCM1480_INT_MBOX_0_0;
|
||||
unsigned int action;
|
||||
|
||||
kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
/* Load the mailbox register to figure out what we're supposed to do */
|
||||
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
|
||||
|
||||
|
|
|
@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
|
|||
void sb1250_mailbox_interrupt(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int irq = K_INT_MBOX_0;
|
||||
unsigned int action;
|
||||
|
||||
kstat_this_cpu.irqs[K_INT_MBOX_0]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
/* Load the mailbox register to figure out what we're supposed to do */
|
||||
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
|
||||
|
||||
|
|
|
@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
|
|||
* the stack NMI-atomically, it's safe to use smp_processor_id().
|
||||
*/
|
||||
int sum, cpu = smp_processor_id();
|
||||
int irq = NMIIRQ;
|
||||
u8 wdt, tmp;
|
||||
|
||||
wdt = WDCTR & ~WDCTR_WDCNE;
|
||||
|
@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
|
|||
NMICR = NMICR_WDIF;
|
||||
|
||||
nmi_count(cpu)++;
|
||||
kstat_this_cpu.irqs[NMIIRQ]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
sum = irq_stat[cpu].__irq_count;
|
||||
|
||||
if (last_irq_sums[cpu] == sum) {
|
||||
|
|
|
@ -336,10 +336,11 @@
|
|||
#define NUM_PDC_RESULT 32
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int pdc_type;
|
||||
|
||||
/* Values for pdc_type */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _PARISC_SWAB_H
|
||||
#define _PARISC_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define __SWAB_64_THRU_32__
|
||||
|
|
|
@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
|
|||
if (CHECK_IRQ_PER_CPU(irq)) {
|
||||
/* Bad linux design decision. The mask has already
|
||||
* been set; we must reset it */
|
||||
irq_desc[irq].affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(irq_desc[irq].affinity);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
|||
if (cpu_check_affinity(irq, dest))
|
||||
return;
|
||||
|
||||
irq_desc[irq].affinity = *dest;
|
||||
cpumask_copy(irq_desc[irq].affinity, dest);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
|
|||
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
#endif
|
||||
|
||||
return per_cpu(cpu_data, cpu).txn_addr;
|
||||
|
@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
|||
irq = eirr_to_irq(eirr_val);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dest = irq_desc[irq].affinity;
|
||||
cpumask_copy(&dest, irq_desc[irq].affinity);
|
||||
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
|
||||
!cpu_isset(smp_processor_id(), dest)) {
|
||||
int cpu = first_cpu(dest);
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#ifndef __ASM_BOOTX_H__
|
||||
#define __ASM_BOOTX_H__
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef macintosh
|
||||
#include <Types.h>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <asm/string.h>
|
||||
#endif
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/auxvec.h>
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#ifndef __LINUX_KVM_POWERPC_H
|
||||
#define __LINUX_KVM_POWERPC_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct kvm_regs {
|
||||
__u64 pc;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#ifndef _ASM_POWERPC_PS3FB_H_
|
||||
#define _ASM_POWERPC_PS3FB_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/* ioctl */
|
||||
|
|
|
@ -23,9 +23,10 @@
|
|||
#ifndef _SPU_INFO_H
|
||||
#define _SPU_INFO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/spu.h>
|
||||
#include <linux/types.h>
|
||||
#else
|
||||
struct mfc_cq_sr {
|
||||
__u64 mfc_cq_data0_RW;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
|
|
@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
|
|||
if (irq_desc[irq].status & IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpumask_and(&mask, irq_desc[irq].affinity, &map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
|
|
|
@ -184,6 +184,7 @@ SECTIONS
|
|||
. = ALIGN(PAGE_SIZE);
|
||||
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu.page_aligned)
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
|
|
|
@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
|
|||
{
|
||||
int server;
|
||||
/* For the moment only implement delivery to all cpus or one cpu */
|
||||
cpumask_t cpumask = irq_desc[virq].affinity;
|
||||
cpumask_t cpumask;
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
|
||||
cpumask_copy(&cpumask, irq_desc[virq].affinity);
|
||||
if (!distribute_irqs)
|
||||
return default_server;
|
||||
|
||||
|
@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
|
|||
virq, cpu);
|
||||
|
||||
/* Reset affinity to all cpus */
|
||||
irq_desc[virq].affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(irq_desc[virq].affinity);
|
||||
desc->chip->set_affinity(virq, cpu_all_mask);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
|
|
@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
|
|||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
|
|
|
@ -252,9 +252,10 @@ struct irq_handler_data {
|
|||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
|
@ -805,7 +806,7 @@ void fixup_irqs(void)
|
|||
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq,
|
||||
&irq_desc[irq].affinity);
|
||||
irq_desc[irq].affinity);
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||
}
|
||||
|
|
|
@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
|
|||
|
||||
irq_enter();
|
||||
|
||||
kstat_this_cpu.irqs[0]++;
|
||||
kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
|
||||
|
||||
if (unlikely(!evt->event_handler)) {
|
||||
printk(KERN_WARNING
|
||||
|
|
664
arch/x86/Kconfig
664
arch/x86/Kconfig
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -50,7 +50,7 @@ config M386
|
|||
config M486
|
||||
bool "486"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a 486 series processor, either Intel or one of the
|
||||
compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
|
||||
DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
|
||||
|
@ -59,7 +59,7 @@ config M486
|
|||
config M586
|
||||
bool "586/K5/5x86/6x86/6x86MX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an 586 or 686 series processor such as the AMD K5,
|
||||
the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
|
||||
assume the RDTSC (Read Time Stamp Counter) instruction.
|
||||
|
@ -67,21 +67,21 @@ config M586
|
|||
config M586TSC
|
||||
bool "Pentium-Classic"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium Classic processor with the RDTSC (Read
|
||||
Time Stamp Counter) instruction for benchmarking.
|
||||
|
||||
config M586MMX
|
||||
bool "Pentium-MMX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium with the MMX graphics/multimedia
|
||||
extended instructions.
|
||||
|
||||
config M686
|
||||
bool "Pentium-Pro"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium Pro chips. This enables the use of
|
||||
Pentium Pro extended instructions, and disables the init-time guard
|
||||
against the f00f bug found in earlier Pentiums.
|
||||
|
@ -89,7 +89,7 @@ config M686
|
|||
config MPENTIUMII
|
||||
bool "Pentium-II/Celeron(pre-Coppermine)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-II and
|
||||
pre-Coppermine Celeron core. This option enables an unaligned
|
||||
copy optimization, compiles the kernel with optimization flags
|
||||
|
@ -99,7 +99,7 @@ config MPENTIUMII
|
|||
config MPENTIUMIII
|
||||
bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-III and
|
||||
Celeron-Coppermine core. This option enables use of some
|
||||
extended prefetch instructions in addition to the Pentium II
|
||||
|
@ -108,14 +108,14 @@ config MPENTIUMIII
|
|||
config MPENTIUMM
|
||||
bool "Pentium M"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium M (not Pentium-4 M)
|
||||
notebook chips.
|
||||
|
||||
config MPENTIUM4
|
||||
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium 4 chips. This includes the
|
||||
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
|
||||
Pentium-4 M (not Pentium M) chips. This option enables compile
|
||||
|
@ -151,7 +151,7 @@ config MPENTIUM4
|
|||
config MK6
|
||||
bool "K6/K6-II/K6-III"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD K6-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
@ -159,14 +159,14 @@ config MK6
|
|||
config MK7
|
||||
bool "Athlon/Duron/K7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Athlon K7-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
bool "Opteron/Athlon64/Hammer/K8"
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
@ -174,7 +174,7 @@ config MK8
|
|||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Crusoe processor. Treats the processor
|
||||
like a 586 with TSC, and sets some GCC optimization flags (like a
|
||||
Pentium Pro with no alignment requirements).
|
||||
|
@ -182,13 +182,13 @@ config MCRUSOE
|
|||
config MEFFICEON
|
||||
bool "Efficeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Efficeon processor.
|
||||
|
||||
config MWINCHIPC6
|
||||
bool "Winchip-C6"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip C6 chip. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements.
|
||||
|
@ -196,7 +196,7 @@ config MWINCHIPC6
|
|||
config MWINCHIP3D
|
||||
bool "Winchip-2/Winchip-2A/Winchip-3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements. Also enable out of order memory
|
||||
|
@ -206,19 +206,19 @@ config MWINCHIP3D
|
|||
config MGEODEGX1
|
||||
bool "GeodeGX1"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Geode GX1 (Cyrix MediaGX) chip.
|
||||
|
||||
config MGEODE_LX
|
||||
bool "Geode GX/LX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for AMD Geode GX and LX processors.
|
||||
|
||||
config MCYRIXIII
|
||||
bool "CyrixIII/VIA-C3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Cyrix III or C3 chip. Presently Linux and GCC
|
||||
treat this chip as a generic 586. Whilst the CPU is 686 class,
|
||||
it lacks the cmov extension which gcc assumes is present when
|
||||
|
@ -230,7 +230,7 @@ config MCYRIXIII
|
|||
config MVIAC3_2
|
||||
bool "VIA C3-2 (Nehemiah)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C3 "Nehemiah". Selecting this enables usage
|
||||
of SSE and tells gcc to treat the CPU as a 686.
|
||||
Note, this kernel will not boot on older (pre model 9) C3s.
|
||||
|
@ -238,14 +238,14 @@ config MVIAC3_2
|
|||
config MVIAC7
|
||||
bool "VIA C7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C7. Selecting this uses the correct cache
|
||||
shift and tells gcc to treat the CPU as a 686.
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
Xeon CPUs with Intel 64bit which is compatible with x86-64.
|
||||
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
|
||||
|
@ -255,7 +255,7 @@ config MPSC
|
|||
|
||||
config MCORE2
|
||||
bool "Core 2/newer Xeon"
|
||||
help
|
||||
---help---
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
53xx) CPUs. You can distinguish newer from older Xeons by the CPU
|
||||
|
@ -265,7 +265,7 @@ config MCORE2
|
|||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
|
@ -274,7 +274,7 @@ endchoice
|
|||
config X86_GENERIC
|
||||
bool "Generic x86 support"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Instead of just including optimizations for the selected
|
||||
x86 variant (e.g. PII, Crusoe or Athlon), include some more
|
||||
generic optimizations as well. This will make the kernel
|
||||
|
@ -294,25 +294,23 @@ config X86_CPU
|
|||
# Define implied options from the CPU selection here
|
||||
config X86_L1_CACHE_BYTES
|
||||
int
|
||||
default "128" if GENERIC_CPU || MPSC
|
||||
default "64" if MK8 || MCORE2
|
||||
depends on X86_64
|
||||
default "128" if MPSC
|
||||
default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
|
||||
|
||||
config X86_INTERNODE_CACHE_BYTES
|
||||
int
|
||||
default "4096" if X86_VSMP
|
||||
default X86_L1_CACHE_BYTES if !X86_VSMP
|
||||
depends on X86_64
|
||||
|
||||
config X86_CMPXCHG
|
||||
def_bool X86_64 || (X86_32 && !M386)
|
||||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
|
||||
config X86_XADD
|
||||
def_bool y
|
||||
|
@ -321,7 +319,7 @@ config X86_XADD
|
|||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
|
||||
help
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
|
@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
|
|||
|
||||
menuconfig PROCESSOR_SELECT
|
||||
bool "Supported processor vendors" if EMBEDDED
|
||||
help
|
||||
---help---
|
||||
This lets you choose what x86 vendor support code your kernel
|
||||
will include.
|
||||
|
||||
config CPU_SUP_INTEL
|
||||
default y
|
||||
bool "Support Intel processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Intel processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
|
|||
default y
|
||||
bool "Support Cyrix processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Cyrix processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
|
|||
config CPU_SUP_AMD
|
||||
default y
|
||||
bool "Support AMD processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for AMD processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on 64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
|
|||
default y
|
||||
bool "Support Transmeta processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Transmeta processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
|
|||
default y
|
||||
bool "Support UMC processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for UMC processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -523,7 +521,7 @@ config X86_PTRACE_BTS
|
|||
bool "Branch Trace Store"
|
||||
default y
|
||||
depends on X86_DEBUGCTLMSR
|
||||
help
|
||||
---help---
|
||||
This adds a ptrace interface to the hardware's branch trace store.
|
||||
|
||||
Debuggers may use it to collect an execution trace of the debugged
|
||||
|
|
|
@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
|
|||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
help
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
|
@ -25,7 +25,7 @@ config STRICT_DEVMEM
|
|||
config X86_VERBOSE_BOOTUP
|
||||
bool "Enable verbose x86 bootup info messages"
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Enables the informational output from the decompression stage
|
||||
(e.g. bzImage) of the boot. If you disable this you will still
|
||||
see errors. Disable this if you want silent bootup.
|
||||
|
@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
|
|||
config EARLY_PRINTK
|
||||
bool "Early printk" if EMBEDDED
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the VGA buffer or to a serial
|
||||
port.
|
||||
|
||||
|
@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
|
|||
bool "Early printk via EHCI debug port"
|
||||
default n
|
||||
depends on EARLY_PRINTK && PCI
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the EHCI debug port.
|
||||
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
|
@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
|
|||
config DEBUG_STACKOVERFLOW
|
||||
bool "Check for stack overflows"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
This option will cause messages to be printed if free stack space
|
||||
drops below a certain limit.
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Enables the display of the minimum amount of free stack which each
|
||||
task has ever had available in the sysrq-T and sysrq-P debug output.
|
||||
|
||||
|
@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
|
|||
config DEBUG_PAGEALLOC
|
||||
bool "Debug page memory allocations"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Unmap pages from the kernel linear mapping after free_pages().
|
||||
This results in a large slowdown, but helps to find certain types
|
||||
of memory corruptions.
|
||||
|
@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
|
|||
config DEBUG_PER_CPU_MAPS
|
||||
bool "Debug access to per_cpu maps"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86_SMP
|
||||
depends on SMP
|
||||
default n
|
||||
help
|
||||
---help---
|
||||
Say Y to verify that the per_cpu map being accessed has
|
||||
been setup. Adds a fair amount of code to kernel memory
|
||||
and decreases performance.
|
||||
|
@ -96,7 +96,7 @@ config X86_PTDUMP
|
|||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
select DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
Say Y here if you want to show the kernel pagetable layout in a
|
||||
debugfs file. This information is only useful for kernel developers
|
||||
who are working in architecture specific areas of the kernel.
|
||||
|
@ -108,7 +108,7 @@ config DEBUG_RODATA
|
|||
bool "Write protect kernel read-only data structures"
|
||||
default y
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Mark the kernel read-only data as write-protected in the pagetables,
|
||||
in order to catch accidental (and incorrect) writes to such const
|
||||
data. This is recommended so that we can catch kernel bugs sooner.
|
||||
|
@ -117,7 +117,8 @@ config DEBUG_RODATA
|
|||
config DEBUG_RODATA_TEST
|
||||
bool "Testcase for the DEBUG_RODATA feature"
|
||||
depends on DEBUG_RODATA
|
||||
help
|
||||
default y
|
||||
---help---
|
||||
This option enables a testcase for the DEBUG_RODATA
|
||||
feature as well as for the change_page_attr() infrastructure.
|
||||
If in doubt, say "N"
|
||||
|
@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
|
|||
config DEBUG_NX_TEST
|
||||
tristate "Testcase for the NX non-executable stack feature"
|
||||
depends on DEBUG_KERNEL && m
|
||||
help
|
||||
---help---
|
||||
This option enables a testcase for the CPU NX capability
|
||||
and the software setup of this feature.
|
||||
If in doubt, say "N"
|
||||
|
@ -133,7 +134,7 @@ config DEBUG_NX_TEST
|
|||
config 4KSTACKS
|
||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
If you say Y here the kernel will use a 4Kb stacksize for the
|
||||
kernel stack attached to each process/thread. This facilitates
|
||||
running more threads on a system and also reduces the pressure
|
||||
|
@ -144,7 +145,7 @@ config DOUBLEFAULT
|
|||
default y
|
||||
bool "Enable doublefault exception handler" if EMBEDDED
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
This option allows trapping of rare doublefault exceptions that
|
||||
would otherwise cause a system to silently reboot. Disabling this
|
||||
option saves about 4k and might cause you much additional grey
|
||||
|
@ -154,7 +155,7 @@ config IOMMU_DEBUG
|
|||
bool "Enable IOMMU debugging"
|
||||
depends on GART_IOMMU && DEBUG_KERNEL
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Force the IOMMU to on even when you have less than 4GB of
|
||||
memory and add debugging code. On overflow always panic. And
|
||||
allow to enable IOMMU leak tracing. Can be disabled at boot
|
||||
|
@ -170,7 +171,7 @@ config IOMMU_LEAK
|
|||
bool "IOMMU leak tracing"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on IOMMU_DEBUG
|
||||
help
|
||||
---help---
|
||||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||
|
||||
|
@ -203,25 +204,25 @@ choice
|
|||
|
||||
config IO_DELAY_0X80
|
||||
bool "port 0x80 based port-IO delay [recommended]"
|
||||
help
|
||||
---help---
|
||||
This is the traditional Linux IO delay used for in/out_p.
|
||||
It is the most tested hence safest selection here.
|
||||
|
||||
config IO_DELAY_0XED
|
||||
bool "port 0xed based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use port 0xed as the IO delay. This frees up port 0x80 which is
|
||||
often used as a hardware-debug port.
|
||||
|
||||
config IO_DELAY_UDELAY
|
||||
bool "udelay based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use udelay(2) as the IO delay method. This provides the delay
|
||||
while not having any side-effect on the IO port space.
|
||||
|
||||
config IO_DELAY_NONE
|
||||
bool "no port-IO delay"
|
||||
help
|
||||
---help---
|
||||
No port-IO delay. Will break on old boxes that require port-IO
|
||||
delay for certain operations. Should work on most new machines.
|
||||
|
||||
|
@ -255,18 +256,18 @@ config DEBUG_BOOT_PARAMS
|
|||
bool "Debug boot parameters"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
This option will cause struct boot_params to be exported via debugfs.
|
||||
|
||||
config CPA_DEBUG
|
||||
bool "CPA self-test code"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Do change_page_attr() self-tests every 30 seconds.
|
||||
|
||||
config OPTIMIZE_INLINING
|
||||
bool "Allow gcc to uninline functions marked 'inline'"
|
||||
help
|
||||
---help---
|
||||
This option determines if the kernel forces gcc to inline the functions
|
||||
developers have marked 'inline'. Doing so takes away freedom from gcc to
|
||||
do what it thinks is best, which is desirable for the gcc 3.x series of
|
||||
|
@ -279,4 +280,3 @@ config OPTIMIZE_INLINING
|
|||
If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -70,14 +70,17 @@ else
|
|||
# this works around some issues with generating unwind tables in older gccs
|
||||
# newer gccs do it by default
|
||||
KBUILD_CFLAGS += -maccumulate-outgoing-args
|
||||
endif
|
||||
|
||||
stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
|
||||
"$(CC)" -fstack-protector )
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
|
||||
"$(CC)" -fstack-protector-all )
|
||||
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
ifdef CONFIG_CC_STACKPROTECTOR
|
||||
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
|
||||
stackp-y := -fstack-protector
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
else
|
||||
$(warning stack protector enabled but no compiler support)
|
||||
endif
|
||||
endif
|
||||
|
||||
# Stackpointer is addressed different for 32 bit and 64 bit x86
|
||||
|
@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
# prevent gcc from generating any FP code by mistake
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
|
||||
|
||||
###
|
||||
# Sub architecture support
|
||||
# fcore-y is linked before mcore-y files.
|
||||
|
||||
# Default subarch .c files
|
||||
mcore-y := arch/x86/mach-default/
|
||||
|
||||
# Voyager subarch support
|
||||
mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
|
||||
mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
|
||||
|
||||
# generic subarchitecture
|
||||
mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
|
||||
fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
|
||||
mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
|
||||
|
||||
# default subarch .h files
|
||||
mflags-y += -Iarch/x86/include/asm/mach-default
|
||||
|
||||
# 64 bit does not support subarch support - clear sub arch variables
|
||||
fcore-$(CONFIG_X86_64) :=
|
||||
mcore-$(CONFIG_X86_64) :=
|
||||
|
||||
KBUILD_CFLAGS += $(mflags-y)
|
||||
KBUILD_AFLAGS += $(mflags-y)
|
||||
|
||||
|
@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
|
|||
core-y += arch/x86/kernel/
|
||||
core-y += arch/x86/mm/
|
||||
|
||||
# Remaining sub architecture files
|
||||
core-y += $(mcore-y)
|
||||
|
||||
core-y += arch/x86/crypto/
|
||||
core-y += arch/x86/vdso/
|
||||
core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
|
||||
|
|
|
@ -6,33 +6,29 @@
|
|||
# for more details.
|
||||
#
|
||||
# Copyright (C) 1994 by Linus Torvalds
|
||||
# Changed by many, many contributors over the years.
|
||||
#
|
||||
|
||||
# ROOT_DEV specifies the default root-device when making the image.
|
||||
# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
|
||||
# the default of FLOPPY is used by 'build'.
|
||||
|
||||
ROOT_DEV := CURRENT
|
||||
ROOT_DEV := CURRENT
|
||||
|
||||
# If you want to preset the SVGA mode, uncomment the next line and
|
||||
# set SVGA_MODE to whatever number you want.
|
||||
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
|
||||
# The number is the same as you would ordinarily press at bootup.
|
||||
|
||||
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
|
||||
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
|
||||
|
||||
# If you want the RAM disk device, define this to be the size in blocks.
|
||||
|
||||
#RAMDISK := -DRAMDISK=512
|
||||
|
||||
targets := vmlinux.bin setup.bin setup.elf zImage bzImage
|
||||
targets := vmlinux.bin setup.bin setup.elf bzImage
|
||||
subdir- := compressed
|
||||
|
||||
setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
|
||||
setup-y += header.o main.o mca.o memory.o pm.o pmjump.o
|
||||
setup-y += printf.o string.o tty.o video.o video-mode.o version.o
|
||||
setup-$(CONFIG_X86_APM_BOOT) += apm.o
|
||||
setup-$(CONFIG_X86_VOYAGER) += voyager.o
|
||||
|
||||
# The link order of the video-*.o modules can matter. In particular,
|
||||
# video-vga.o *must* be listed first, followed by video-vesa.o.
|
||||
|
@ -72,17 +68,13 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
|
|||
KBUILD_CFLAGS += $(call cc-option,-m32)
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
|
||||
$(obj)/zImage: asflags-y := $(SVGA_MODE) $(RAMDISK)
|
||||
$(obj)/bzImage: ccflags-y := -D__BIG_KERNEL__
|
||||
$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
|
||||
$(obj)/bzImage: BUILDFLAGS := -b
|
||||
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
|
||||
|
||||
quiet_cmd_image = BUILD $@
|
||||
cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/setup.bin \
|
||||
$(obj)/vmlinux.bin $(ROOT_DEV) > $@
|
||||
cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
|
||||
$(ROOT_DEV) > $@
|
||||
|
||||
$(obj)/zImage $(obj)/bzImage: $(obj)/setup.bin \
|
||||
$(obj)/vmlinux.bin $(obj)/tools/build FORCE
|
||||
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
|
||||
$(call if_changed,image)
|
||||
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
||||
* Copyright 2009 Intel Corporation
|
||||
*
|
||||
* This file is part of the Linux kernel, and is made available under
|
||||
* the terms of the GNU General Public License version 2.
|
||||
|
@ -15,16 +16,23 @@
|
|||
#include "boot.h"
|
||||
|
||||
#define MAX_8042_LOOPS 100000
|
||||
#define MAX_8042_FF 32
|
||||
|
||||
static int empty_8042(void)
|
||||
{
|
||||
u8 status;
|
||||
int loops = MAX_8042_LOOPS;
|
||||
int ffs = MAX_8042_FF;
|
||||
|
||||
while (loops--) {
|
||||
io_delay();
|
||||
|
||||
status = inb(0x64);
|
||||
if (status == 0xff) {
|
||||
/* FF is a plausible, but very unlikely status */
|
||||
if (!--ffs)
|
||||
return -1; /* Assume no KBC present */
|
||||
}
|
||||
if (status & 1) {
|
||||
/* Read and discard input data */
|
||||
io_delay();
|
||||
|
@ -118,44 +126,37 @@ static void enable_a20_fast(void)
|
|||
|
||||
int enable_a20(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_ELAN)
|
||||
/* Elan croaks if we try to touch the KBC */
|
||||
enable_a20_fast();
|
||||
while (!a20_test_long())
|
||||
;
|
||||
return 0;
|
||||
#elif defined(CONFIG_X86_VOYAGER)
|
||||
/* On Voyager, a20_test() is unsafe? */
|
||||
enable_a20_kbc();
|
||||
return 0;
|
||||
#else
|
||||
int loops = A20_ENABLE_LOOPS;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
int kbc_err;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
kbc_err = empty_8042();
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
empty_8042();
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
#endif
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
if (!kbc_err) {
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -302,9 +302,6 @@ void probe_cards(int unsafe);
|
|||
/* video-vesa.c */
|
||||
void vesa_store_edid(void);
|
||||
|
||||
/* voyager.c */
|
||||
int query_voyager(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* BOOT_BOOT_H */
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# create a compressed vmlinux image from the original vmlinux
|
||||
#
|
||||
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
|
||||
|
@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
|
|||
ifdef CONFIG_RELOCATABLE
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,lzma)
|
||||
else
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzma)
|
||||
endif
|
||||
LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
|
||||
|
||||
else
|
||||
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzma)
|
||||
|
||||
LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
|
||||
endif
|
||||
|
||||
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
|
||||
suffix_$(CONFIG_KERNEL_GZIP) = gz
|
||||
suffix_$(CONFIG_KERNEL_BZIP2) = bz2
|
||||
suffix_$(CONFIG_KERNEL_LZMA) = lzma
|
||||
|
||||
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
|
|
@ -25,14 +25,12 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.section ".text.head","ax",@progbits
|
||||
.globl startup_32
|
||||
|
||||
startup_32:
|
||||
ENTRY(startup_32)
|
||||
cld
|
||||
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments */
|
||||
|
@ -113,6 +111,8 @@ startup_32:
|
|||
*/
|
||||
leal relocated(%ebx), %eax
|
||||
jmp *%eax
|
||||
ENDPROC(startup_32)
|
||||
|
||||
.section ".text"
|
||||
relocated:
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
@ -35,9 +35,7 @@
|
|||
|
||||
.section ".text.head"
|
||||
.code32
|
||||
.globl startup_32
|
||||
|
||||
startup_32:
|
||||
ENTRY(startup_32)
|
||||
cld
|
||||
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments */
|
||||
|
@ -176,6 +174,7 @@ startup_32:
|
|||
|
||||
/* Jump from 32bit compatibility mode into 64bit mode. */
|
||||
lret
|
||||
ENDPROC(startup_32)
|
||||
|
||||
no_longmode:
|
||||
/* This isn't an x86-64 CPU so hang */
|
||||
|
@ -295,7 +294,6 @@ relocated:
|
|||
call decompress_kernel
|
||||
popq %rsi
|
||||
|
||||
|
||||
/*
|
||||
* Jump to the decompressed kernel.
|
||||
*/
|
||||
|
|
|
@ -116,71 +116,13 @@
|
|||
/*
|
||||
* gzip declarations
|
||||
*/
|
||||
|
||||
#define OF(args) args
|
||||
#define STATIC static
|
||||
|
||||
#undef memset
|
||||
#undef memcpy
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
typedef unsigned char uch;
|
||||
typedef unsigned short ush;
|
||||
typedef unsigned long ulg;
|
||||
|
||||
/*
|
||||
* Window size must be at least 32k, and a power of two.
|
||||
* We don't actually have a window just a huge output buffer,
|
||||
* so we report a 2G window size, as that should always be
|
||||
* larger than our output buffer:
|
||||
*/
|
||||
#define WSIZE 0x80000000
|
||||
|
||||
/* Input buffer: */
|
||||
static unsigned char *inbuf;
|
||||
|
||||
/* Sliding window buffer (and final output buffer): */
|
||||
static unsigned char *window;
|
||||
|
||||
/* Valid bytes in inbuf: */
|
||||
static unsigned insize;
|
||||
|
||||
/* Index of next byte to be processed in inbuf: */
|
||||
static unsigned inptr;
|
||||
|
||||
/* Bytes in output buffer: */
|
||||
static unsigned outcnt;
|
||||
|
||||
/* gzip flag byte */
|
||||
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
|
||||
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
|
||||
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
|
||||
#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
|
||||
#define COMMENT 0x10 /* bit 4 set: file comment present */
|
||||
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
|
||||
#define RESERVED 0xC0 /* bit 6, 7: reserved */
|
||||
|
||||
#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
|
||||
|
||||
/* Diagnostic functions */
|
||||
#ifdef DEBUG
|
||||
# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
|
||||
# define Trace(x) do { fprintf x; } while (0)
|
||||
# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
|
||||
# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
|
||||
# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
|
||||
# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
|
||||
#else
|
||||
# define Assert(cond, msg)
|
||||
# define Trace(x)
|
||||
# define Tracev(x)
|
||||
# define Tracevv(x)
|
||||
# define Tracec(c, x)
|
||||
# define Tracecv(c, x)
|
||||
#endif
|
||||
|
||||
static int fill_inbuf(void);
|
||||
static void flush_window(void);
|
||||
static void error(char *m);
|
||||
|
||||
/*
|
||||
|
@ -189,13 +131,8 @@ static void error(char *m);
|
|||
static struct boot_params *real_mode; /* Pointer to real-mode data */
|
||||
static int quiet;
|
||||
|
||||
extern unsigned char input_data[];
|
||||
extern int input_len;
|
||||
|
||||
static long bytes_out;
|
||||
|
||||
static void *memset(void *s, int c, unsigned n);
|
||||
static void *memcpy(void *dest, const void *src, unsigned n);
|
||||
void *memcpy(void *dest, const void *src, unsigned n);
|
||||
|
||||
static void __putstr(int, const char *);
|
||||
#define putstr(__x) __putstr(0, __x)
|
||||
|
@ -213,7 +150,17 @@ static char *vidmem;
|
|||
static int vidport;
|
||||
static int lines, cols;
|
||||
|
||||
#include "../../../../lib/inflate.c"
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#include "../../../../lib/decompress_bunzip2.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZMA
|
||||
#include "../../../../lib/decompress_unlzma.c"
|
||||
#endif
|
||||
|
||||
static void scroll(void)
|
||||
{
|
||||
|
@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
|
|||
return s;
|
||||
}
|
||||
|
||||
static void *memcpy(void *dest, const void *src, unsigned n)
|
||||
void *memcpy(void *dest, const void *src, unsigned n)
|
||||
{
|
||||
int i;
|
||||
const char *s = src;
|
||||
|
@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
|
|||
return dest;
|
||||
}
|
||||
|
||||
/* ===========================================================================
|
||||
* Fill the input buffer. This is called only when the buffer is empty
|
||||
* and at least one byte is really needed.
|
||||
*/
|
||||
static int fill_inbuf(void)
|
||||
{
|
||||
error("ran out of input data");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ===========================================================================
|
||||
* Write the output window window[0..outcnt-1] and update crc and bytes_out.
|
||||
* (Used for the decompressed data only.)
|
||||
*/
|
||||
static void flush_window(void)
|
||||
{
|
||||
/* With my window equal to my output buffer
|
||||
* I only need to compute the crc here.
|
||||
*/
|
||||
unsigned long c = crc; /* temporary variable */
|
||||
unsigned n;
|
||||
unsigned char *in, ch;
|
||||
|
||||
in = window;
|
||||
for (n = 0; n < outcnt; n++) {
|
||||
ch = *in++;
|
||||
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
|
||||
}
|
||||
crc = c;
|
||||
bytes_out += (unsigned long)outcnt;
|
||||
outcnt = 0;
|
||||
}
|
||||
|
||||
static void error(char *x)
|
||||
{
|
||||
|
@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
|||
lines = real_mode->screen_info.orig_video_lines;
|
||||
cols = real_mode->screen_info.orig_video_cols;
|
||||
|
||||
window = output; /* Output buffer (Normally at 1M) */
|
||||
free_mem_ptr = heap; /* Heap */
|
||||
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
|
||||
inbuf = input_data; /* Input buffer */
|
||||
insize = input_len;
|
||||
inptr = 0;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((unsigned long)output & (__KERNEL_ALIGN - 1))
|
||||
|
@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
|||
#endif
|
||||
#endif
|
||||
|
||||
makecrc();
|
||||
if (!quiet)
|
||||
putstr("\nDecompressing Linux... ");
|
||||
gunzip();
|
||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
||||
parse_elf(output);
|
||||
if (!quiet)
|
||||
putstr("done.\nBooting the kernel.\n");
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
*
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* Memory copy routines
|
||||
*/
|
||||
|
@ -15,9 +17,7 @@
|
|||
.code16gcc
|
||||
.text
|
||||
|
||||
.globl memcpy
|
||||
.type memcpy, @function
|
||||
memcpy:
|
||||
GLOBAL(memcpy)
|
||||
pushw %si
|
||||
pushw %di
|
||||
movw %ax, %di
|
||||
|
@ -31,11 +31,9 @@ memcpy:
|
|||
popw %di
|
||||
popw %si
|
||||
ret
|
||||
.size memcpy, .-memcpy
|
||||
ENDPROC(memcpy)
|
||||
|
||||
.globl memset
|
||||
.type memset, @function
|
||||
memset:
|
||||
GLOBAL(memset)
|
||||
pushw %di
|
||||
movw %ax, %di
|
||||
movzbl %dl, %eax
|
||||
|
@ -48,52 +46,42 @@ memset:
|
|||
rep; stosb
|
||||
popw %di
|
||||
ret
|
||||
.size memset, .-memset
|
||||
ENDPROC(memset)
|
||||
|
||||
.globl copy_from_fs
|
||||
.type copy_from_fs, @function
|
||||
copy_from_fs:
|
||||
GLOBAL(copy_from_fs)
|
||||
pushw %ds
|
||||
pushw %fs
|
||||
popw %ds
|
||||
call memcpy
|
||||
popw %ds
|
||||
ret
|
||||
.size copy_from_fs, .-copy_from_fs
|
||||
ENDPROC(copy_from_fs)
|
||||
|
||||
.globl copy_to_fs
|
||||
.type copy_to_fs, @function
|
||||
copy_to_fs:
|
||||
GLOBAL(copy_to_fs)
|
||||
pushw %es
|
||||
pushw %fs
|
||||
popw %es
|
||||
call memcpy
|
||||
popw %es
|
||||
ret
|
||||
.size copy_to_fs, .-copy_to_fs
|
||||
ENDPROC(copy_to_fs)
|
||||
|
||||
#if 0 /* Not currently used, but can be enabled as needed */
|
||||
|
||||
.globl copy_from_gs
|
||||
.type copy_from_gs, @function
|
||||
copy_from_gs:
|
||||
GLOBAL(copy_from_gs)
|
||||
pushw %ds
|
||||
pushw %gs
|
||||
popw %ds
|
||||
call memcpy
|
||||
popw %ds
|
||||
ret
|
||||
.size copy_from_gs, .-copy_from_gs
|
||||
.globl copy_to_gs
|
||||
ENDPROC(copy_from_gs)
|
||||
|
||||
.type copy_to_gs, @function
|
||||
copy_to_gs:
|
||||
GLOBAL(copy_to_gs)
|
||||
pushw %es
|
||||
pushw %gs
|
||||
popw %es
|
||||
call memcpy
|
||||
popw %es
|
||||
ret
|
||||
.size copy_to_gs, .-copy_to_gs
|
||||
|
||||
ENDPROC(copy_to_gs)
|
||||
#endif
|
||||
|
|
|
@ -19,17 +19,13 @@
|
|||
#include <linux/utsrelease.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/setup.h>
|
||||
#include "boot.h"
|
||||
#include "offsets.h"
|
||||
|
||||
SETUPSECTS = 4 /* default nr of setup-sectors */
|
||||
BOOTSEG = 0x07C0 /* original address of boot-sector */
|
||||
SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */
|
||||
SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
|
||||
/* to be loaded */
|
||||
ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
|
||||
SYSSEG = 0x1000 /* historical load address >> 4 */
|
||||
|
||||
#ifndef SVGA_MODE
|
||||
#define SVGA_MODE ASK_VGA
|
||||
|
@ -97,12 +93,12 @@ bugger_off_msg:
|
|||
.section ".header", "a"
|
||||
.globl hdr
|
||||
hdr:
|
||||
setup_sects: .byte SETUPSECTS
|
||||
setup_sects: .byte 0 /* Filled in by build.c */
|
||||
root_flags: .word ROOT_RDONLY
|
||||
syssize: .long SYSSIZE
|
||||
ram_size: .word RAMDISK
|
||||
syssize: .long 0 /* Filled in by build.c */
|
||||
ram_size: .word 0 /* Obsolete */
|
||||
vid_mode: .word SVGA_MODE
|
||||
root_dev: .word ROOT_DEV
|
||||
root_dev: .word 0 /* Filled in by build.c */
|
||||
boot_flag: .word 0xAA55
|
||||
|
||||
# offset 512, entry point
|
||||
|
@ -123,14 +119,15 @@ _start:
|
|||
# or else old loadlin-1.5 will fail)
|
||||
.globl realmode_swtch
|
||||
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
|
||||
start_sys_seg: .word SYSSEG
|
||||
start_sys_seg: .word SYSSEG # obsolete and meaningless, but just
|
||||
# in case something decided to "use" it
|
||||
.word kernel_version-512 # pointing to kernel version string
|
||||
# above section of header is compatible
|
||||
# with loadlin-1.5 (header v1.5). Don't
|
||||
# change it.
|
||||
|
||||
type_of_loader: .byte 0 # = 0, old one (LILO, Loadlin,
|
||||
# Bootlin, SYSLX, bootsect...)
|
||||
type_of_loader: .byte 0 # 0 means ancient bootloader, newer
|
||||
# bootloaders know to change this.
|
||||
# See Documentation/i386/boot.txt for
|
||||
# assigned ids
|
||||
|
||||
|
@ -142,11 +139,7 @@ CAN_USE_HEAP = 0x80 # If set, the loader also has set
|
|||
# space behind setup.S can be used for
|
||||
# heap purposes.
|
||||
# Only the loader knows what is free
|
||||
#ifndef __BIG_KERNEL__
|
||||
.byte 0
|
||||
#else
|
||||
.byte LOADED_HIGH
|
||||
#endif
|
||||
|
||||
setup_move_size: .word 0x8000 # size to move, when setup is not
|
||||
# loaded at 0x90000. We will move setup
|
||||
|
@ -157,11 +150,7 @@ setup_move_size: .word 0x8000 # size to move, when setup is not
|
|||
|
||||
code32_start: # here loaders can put a different
|
||||
# start address for 32-bit code.
|
||||
#ifndef __BIG_KERNEL__
|
||||
.long 0x1000 # 0x1000 = default for zImage
|
||||
#else
|
||||
.long 0x100000 # 0x100000 = default for big kernel
|
||||
#endif
|
||||
|
||||
ramdisk_image: .long 0 # address of loaded ramdisk image
|
||||
# Here the loader puts the 32-bit
|
||||
|
|
|
@ -149,11 +149,6 @@ void main(void)
|
|||
/* Query MCA information */
|
||||
query_mca();
|
||||
|
||||
/* Voyager */
|
||||
#ifdef CONFIG_X86_VOYAGER
|
||||
query_voyager();
|
||||
#endif
|
||||
|
||||
/* Query Intel SpeedStep (IST) information */
|
||||
query_ist();
|
||||
|
||||
|
|
|
@ -32,47 +32,6 @@ static void realmode_switch_hook(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A zImage kernel is loaded at 0x10000 but wants to run at 0x1000.
|
||||
* A bzImage kernel is loaded and runs at 0x100000.
|
||||
*/
|
||||
static void move_kernel_around(void)
|
||||
{
|
||||
/* Note: rely on the compile-time option here rather than
|
||||
the LOADED_HIGH flag. The Qemu kernel loader unconditionally
|
||||
sets the loadflags to zero. */
|
||||
#ifndef __BIG_KERNEL__
|
||||
u16 dst_seg, src_seg;
|
||||
u32 syssize;
|
||||
|
||||
dst_seg = 0x1000 >> 4;
|
||||
src_seg = 0x10000 >> 4;
|
||||
syssize = boot_params.hdr.syssize; /* Size in 16-byte paragraphs */
|
||||
|
||||
while (syssize) {
|
||||
int paras = (syssize >= 0x1000) ? 0x1000 : syssize;
|
||||
int dwords = paras << 2;
|
||||
|
||||
asm volatile("pushw %%es ; "
|
||||
"pushw %%ds ; "
|
||||
"movw %1,%%es ; "
|
||||
"movw %2,%%ds ; "
|
||||
"xorw %%di,%%di ; "
|
||||
"xorw %%si,%%si ; "
|
||||
"rep;movsl ; "
|
||||
"popw %%ds ; "
|
||||
"popw %%es"
|
||||
: "+c" (dwords)
|
||||
: "r" (dst_seg), "r" (src_seg)
|
||||
: "esi", "edi");
|
||||
|
||||
syssize -= paras;
|
||||
dst_seg += paras;
|
||||
src_seg += paras;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable all interrupts at the legacy PIC.
|
||||
*/
|
||||
|
@ -147,9 +106,6 @@ void go_to_protected_mode(void)
|
|||
/* Hook before leaving real mode, also disables interrupts */
|
||||
realmode_switch_hook();
|
||||
|
||||
/* Move the kernel/setup to their final resting places */
|
||||
move_kernel_around();
|
||||
|
||||
/* Enable the A20 gate */
|
||||
if (enable_a20()) {
|
||||
puts("A20 gate not responding, unable to boot...\n");
|
||||
|
|
|
@ -15,18 +15,15 @@
|
|||
#include <asm/boot.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/segment.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
.globl protected_mode_jump
|
||||
.type protected_mode_jump, @function
|
||||
|
||||
.code16
|
||||
|
||||
/*
|
||||
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
|
||||
*/
|
||||
protected_mode_jump:
|
||||
GLOBAL(protected_mode_jump)
|
||||
movl %edx, %esi # Pointer to boot_params table
|
||||
|
||||
xorl %ebx, %ebx
|
||||
|
@ -47,12 +44,10 @@ protected_mode_jump:
|
|||
.byte 0x66, 0xea # ljmpl opcode
|
||||
2: .long in_pm32 # offset
|
||||
.word __BOOT_CS # segment
|
||||
|
||||
.size protected_mode_jump, .-protected_mode_jump
|
||||
ENDPROC(protected_mode_jump)
|
||||
|
||||
.code32
|
||||
.type in_pm32, @function
|
||||
in_pm32:
|
||||
GLOBAL(in_pm32)
|
||||
# Set up data segments for flat 32-bit mode
|
||||
movl %ecx, %ds
|
||||
movl %ecx, %es
|
||||
|
@ -78,5 +73,4 @@ in_pm32:
|
|||
lldt %cx
|
||||
|
||||
jmpl *%eax # Jump to the 32-bit entrypoint
|
||||
|
||||
.size in_pm32, .-in_pm32
|
||||
ENDPROC(in_pm32)
|
||||
|
|
|
@ -130,7 +130,7 @@ static void die(const char * str, ...)
|
|||
|
||||
static void usage(void)
|
||||
{
|
||||
die("Usage: build [-b] setup system [rootdev] [> image]");
|
||||
die("Usage: build setup system [rootdev] [> image]");
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
|
@ -145,11 +145,6 @@ int main(int argc, char ** argv)
|
|||
void *kernel;
|
||||
u32 crc = 0xffffffffUL;
|
||||
|
||||
if (argc > 2 && !strcmp(argv[1], "-b"))
|
||||
{
|
||||
is_big_kernel = 1;
|
||||
argc--, argv++;
|
||||
}
|
||||
if ((argc < 3) || (argc > 4))
|
||||
usage();
|
||||
if (argc > 3) {
|
||||
|
@ -216,8 +211,6 @@ int main(int argc, char ** argv)
|
|||
die("Unable to mmap '%s': %m", argv[2]);
|
||||
/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
|
||||
sys_size = (sz + 15 + 4) / 16;
|
||||
if (!is_big_kernel && sys_size > DEF_SYSSIZE)
|
||||
die("System is too big. Try using bzImage or modules.");
|
||||
|
||||
/* Patch the setup code with the appropriate size parameters */
|
||||
buf[0x1f1] = setup_sectors-1;
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
/* -*- linux-c -*- ------------------------------------------------------- *
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2007 rPath, Inc. - All Rights Reserved
|
||||
*
|
||||
* This file is part of the Linux kernel, and is made available under
|
||||
* the terms of the GNU General Public License version 2.
|
||||
*
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* Get the Voyager config information
|
||||
*/
|
||||
|
||||
#include "boot.h"
|
||||
|
||||
int query_voyager(void)
|
||||
{
|
||||
u8 err;
|
||||
u16 es, di;
|
||||
/* Abuse the apm_bios_info area for this */
|
||||
u8 *data_ptr = (u8 *)&boot_params.apm_bios_info;
|
||||
|
||||
data_ptr[0] = 0xff; /* Flag on config not found(?) */
|
||||
|
||||
asm("pushw %%es ; "
|
||||
"int $0x15 ; "
|
||||
"setc %0 ; "
|
||||
"movw %%es, %1 ; "
|
||||
"popw %%es"
|
||||
: "=q" (err), "=r" (es), "=D" (di)
|
||||
: "a" (0xffc0));
|
||||
|
||||
if (err)
|
||||
return -1; /* Not Voyager */
|
||||
|
||||
set_fs(es);
|
||||
copy_from_fs(data_ptr, di, 7); /* Table is 7 bytes apparently */
|
||||
return 0;
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -33,8 +33,6 @@
|
|||
#include <asm/sigframe.h>
|
||||
#include <asm/sys_ia32.h>
|
||||
|
||||
#define DEBUG_SIG 0
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
|
||||
|
@ -46,78 +44,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
|||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
put_user_try {
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
put_user_ex(from->si_signo, &to->si_signo);
|
||||
put_user_ex(from->si_errno, &to->si_errno);
|
||||
put_user_ex((short)from->si_code, &to->si_code);
|
||||
|
||||
if (from->si_code < 0) {
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
err |= __put_user(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
if (from->si_code < 0) {
|
||||
put_user_ex(from->si_pid, &to->si_pid);
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
put_user_ex(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
put_user_ex(from->si_utime, &to->si_utime);
|
||||
put_user_ex(from->si_stime, &to->si_stime);
|
||||
put_user_ex(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
put_user_ex(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
put_user_ex(from->si_overrun, &to->si_overrun);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(from->si_int, &to->si_int);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 ptr32;
|
||||
|
||||
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
get_user_try {
|
||||
get_user_ex(to->si_signo, &from->si_signo);
|
||||
get_user_ex(to->si_errno, &from->si_errno);
|
||||
get_user_ex(to->si_code, &from->si_code);
|
||||
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
get_user_ex(to->si_pid, &from->si_pid);
|
||||
get_user_ex(to->si_uid, &from->si_uid);
|
||||
get_user_ex(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
} get_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -142,17 +145,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
int ret, err = 0;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (uss_ptr) {
|
||||
u32 ptr;
|
||||
|
||||
memset(&uss, 0, sizeof(stack_t));
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__get_user(ptr, &uss_ptr->ss_sp) ||
|
||||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
|
||||
__get_user(uss.ss_size, &uss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
get_user_try {
|
||||
get_user_ex(ptr, &uss_ptr->ss_sp);
|
||||
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
|
||||
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
|
||||
} get_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
uss.ss_sp = compat_ptr(ptr);
|
||||
}
|
||||
|
@ -161,10 +170,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
|
||||
set_fs(seg);
|
||||
if (ret >= 0 && uoss_ptr) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
|
||||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
|
||||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
|
||||
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
|
||||
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
|
@ -173,75 +188,78 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
/*
|
||||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
#define loadsegment_gs(v) load_gs_index(v)
|
||||
#define loadsegment_fs(v) loadsegment(fs, v)
|
||||
#define loadsegment_ds(v) loadsegment(ds, v)
|
||||
#define loadsegment_es(v) loadsegment(es, v)
|
||||
|
||||
#define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; })
|
||||
#define set_user_seg(seg, v) loadsegment_##seg(v)
|
||||
|
||||
#define COPY(x) { \
|
||||
err |= __get_user(regs->x, &sc->x); \
|
||||
get_user_ex(regs->x, &sc->x); \
|
||||
}
|
||||
|
||||
#define COPY_SEG_CPL3(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
regs->seg = tmp | 3; \
|
||||
}
|
||||
#define GET_SEG(seg) ({ \
|
||||
unsigned short tmp; \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
tmp; \
|
||||
})
|
||||
|
||||
#define COPY_SEG_CPL3(seg) do { \
|
||||
regs->seg = GET_SEG(seg) | 3; \
|
||||
} while (0)
|
||||
|
||||
#define RELOAD_SEG(seg) { \
|
||||
unsigned int cur, pre; \
|
||||
err |= __get_user(pre, &sc->seg); \
|
||||
savesegment(seg, cur); \
|
||||
unsigned int pre = GET_SEG(seg); \
|
||||
unsigned int cur = get_user_seg(seg); \
|
||||
pre |= 3; \
|
||||
if (pre != cur) \
|
||||
loadsegment(seg, pre); \
|
||||
set_user_seg(seg, pre); \
|
||||
}
|
||||
|
||||
static int ia32_restore_sigcontext(struct pt_regs *regs,
|
||||
struct sigcontext_ia32 __user *sc,
|
||||
unsigned int *pax)
|
||||
{
|
||||
unsigned int tmpflags, gs, oldgs, err = 0;
|
||||
unsigned int tmpflags, err = 0;
|
||||
void __user *buf;
|
||||
u32 tmp;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG restore_sigcontext: "
|
||||
"sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
|
||||
sc, sc->err, sc->ip, sc->cs, sc->flags);
|
||||
#endif
|
||||
get_user_try {
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
RELOAD_SEG(gs);
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
err |= __get_user(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
get_user_ex(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
get_user_ex(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
err |= __get_user(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= __get_user(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
err |= __get_user(*pax, &sc->ax);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -317,38 +335,36 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||
void __user *fpstate,
|
||||
struct pt_regs *regs, unsigned int mask)
|
||||
{
|
||||
int tmp, err = 0;
|
||||
int err = 0;
|
||||
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
|
||||
put_user_try {
|
||||
put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs);
|
||||
put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs);
|
||||
put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds);
|
||||
put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es);
|
||||
|
||||
err |= __put_user(regs->di, &sc->di);
|
||||
err |= __put_user(regs->si, &sc->si);
|
||||
err |= __put_user(regs->bp, &sc->bp);
|
||||
err |= __put_user(regs->sp, &sc->sp);
|
||||
err |= __put_user(regs->bx, &sc->bx);
|
||||
err |= __put_user(regs->dx, &sc->dx);
|
||||
err |= __put_user(regs->cx, &sc->cx);
|
||||
err |= __put_user(regs->ax, &sc->ax);
|
||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||
err |= __put_user(current->thread.error_code, &sc->err);
|
||||
err |= __put_user(regs->ip, &sc->ip);
|
||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
put_user_ex(regs->di, &sc->di);
|
||||
put_user_ex(regs->si, &sc->si);
|
||||
put_user_ex(regs->bp, &sc->bp);
|
||||
put_user_ex(regs->sp, &sc->sp);
|
||||
put_user_ex(regs->bx, &sc->bx);
|
||||
put_user_ex(regs->dx, &sc->dx);
|
||||
put_user_ex(regs->cx, &sc->cx);
|
||||
put_user_ex(regs->ax, &sc->ax);
|
||||
put_user_ex(current->thread.trap_no, &sc->trapno);
|
||||
put_user_ex(current->thread.error_code, &sc->err);
|
||||
put_user_ex(regs->ip, &sc->ip);
|
||||
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->sp, &sc->sp_at_signal);
|
||||
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
|
||||
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
err |= __put_user(current->thread.cr2, &sc->cr2);
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
put_user_ex(current->thread.cr2, &sc->cr2);
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -437,13 +453,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
else
|
||||
restorer = &frame->retcode;
|
||||
}
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -462,11 +482,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
|
||||
current->comm, current->pid, frame, regs->ip, frame->pretcode);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -496,41 +511,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
put_user_try {
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -549,10 +563,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
|
||||
current->comm, current->pid, frame, regs->ip, frame->pretcode);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
|
|||
CFI_DEF_CFA rsp,0
|
||||
CFI_REGISTER rsp,rbp
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movq %gs:pda_kernelstack, %rsp
|
||||
addq $(PDA_STACKOFFSET),%rsp
|
||||
movq PER_CPU_VAR(kernel_stack), %rsp
|
||||
addq $(KERNEL_STACK_OFFSET),%rsp
|
||||
/*
|
||||
* No need to follow this irqs on/off section: the syscall
|
||||
* disabled irqs, here we enable it straight after entry:
|
||||
|
@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
|
|||
ENTRY(ia32_cstar_target)
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,PDA_STACKOFFSET
|
||||
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
||||
CFI_REGISTER rip,rcx
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movl %esp,%r8d
|
||||
CFI_REGISTER rsp,r8
|
||||
movq %gs:pda_kernelstack,%rsp
|
||||
movq PER_CPU_VAR(kernel_stack),%rsp
|
||||
/*
|
||||
* No need to follow this irqs on/off section: the syscall
|
||||
* disabled irqs and here we enable it straight after entry:
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
|
|||
dump->regs.ds = (u16)regs->ds;
|
||||
dump->regs.es = (u16)regs->es;
|
||||
dump->regs.fs = (u16)regs->fs;
|
||||
savesegment(gs, dump->regs.gs);
|
||||
dump->regs.gs = get_user_gs(regs);
|
||||
dump->regs.orig_ax = regs->orig_ax;
|
||||
dump->regs.ip = regs->ip;
|
||||
dump->regs.cs = (u16)regs->cs;
|
||||
|
|
|
@ -102,9 +102,6 @@ static inline void disable_acpi(void)
|
|||
acpi_noirq = 1;
|
||||
}
|
||||
|
||||
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
|
||||
#define FIX_ACPI_PAGES 4
|
||||
|
||||
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
|
||||
|
||||
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
#ifndef _ASM_X86_APIC_H
|
||||
#define _ASM_X86_APIC_H
|
||||
|
||||
#include <linux/pm.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
#define ARCH_APICTIMER_STOPS_ON_C3 1
|
||||
|
@ -33,7 +36,13 @@
|
|||
} while (0)
|
||||
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
extern void generic_apic_probe(void);
|
||||
#else
|
||||
static inline void generic_apic_probe(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
|
@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
|
|||
extern int local_apic_timer_c2_ok;
|
||||
|
||||
extern int disable_apic;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void __inquire_remote_apic(int apicid)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline void default_inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic functions accessing APICs.
|
||||
*/
|
||||
|
@ -51,7 +75,14 @@ extern int disable_apic;
|
|||
#define setup_secondary_clock setup_secondary_APIC_clock
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_VSMP
|
||||
extern int is_vsmp_box(void);
|
||||
#else
|
||||
static inline int is_vsmp_box(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
extern void xapic_wait_icr_idle(void);
|
||||
extern u32 safe_xapic_wait_icr_idle(void);
|
||||
extern void xapic_icr_write(u32, u32);
|
||||
|
@ -71,6 +102,12 @@ static inline u32 native_apic_mem_read(u32 reg)
|
|||
return *((volatile u32 *)(APIC_BASE + reg));
|
||||
}
|
||||
|
||||
extern void native_apic_wait_icr_idle(void);
|
||||
extern u32 native_safe_apic_wait_icr_idle(void);
|
||||
extern void native_apic_icr_write(u32 low, u32 id);
|
||||
extern u64 native_apic_icr_read(void);
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
static inline void native_apic_msr_write(u32 reg, u32 v)
|
||||
{
|
||||
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
|
||||
|
@ -91,8 +128,32 @@ static inline u32 native_apic_msr_read(u32 reg)
|
|||
return low;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_32
|
||||
extern int x2apic;
|
||||
static inline void native_x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return;
|
||||
}
|
||||
|
||||
static inline u32 native_safe_x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void native_x2apic_icr_write(u32 low, u32 id)
|
||||
{
|
||||
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
|
||||
}
|
||||
|
||||
static inline u64 native_x2apic_icr_read(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
|
||||
return val;
|
||||
}
|
||||
|
||||
extern int x2apic, x2apic_phys;
|
||||
extern void check_x2apic(void);
|
||||
extern void enable_x2apic(void);
|
||||
extern void enable_IR_x2apic(void);
|
||||
|
@ -110,30 +171,24 @@ static inline int x2apic_enabled(void)
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
#define x2apic_enabled() 0
|
||||
static inline void check_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline void enable_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline void enable_IR_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline int x2apic_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct apic_ops {
|
||||
u32 (*read)(u32 reg);
|
||||
void (*write)(u32 reg, u32 v);
|
||||
u64 (*icr_read)(void);
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
};
|
||||
|
||||
extern struct apic_ops *apic_ops;
|
||||
|
||||
#define apic_read (apic_ops->read)
|
||||
#define apic_write (apic_ops->write)
|
||||
#define apic_icr_read (apic_ops->icr_read)
|
||||
#define apic_icr_write (apic_ops->icr_write)
|
||||
#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
|
||||
#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
static inline void ack_x2APIC_irq(void)
|
||||
{
|
||||
/* Docs say use 0 for future compatibility */
|
||||
|
@ -141,18 +196,6 @@ static inline void ack_x2APIC_irq(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
}
|
||||
|
||||
extern int lapic_get_maxlvt(void);
|
||||
extern void clear_local_APIC(void);
|
||||
extern void connect_bsp_APIC(void);
|
||||
|
@ -196,4 +239,329 @@ static inline void disable_local_APIC(void) { }
|
|||
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define SET_APIC_ID(x) (apic->set_apic_id(x))
|
||||
#else
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
*
|
||||
* Generic APIC sub-arch data struct.
|
||||
*
|
||||
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
|
||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||
* James Cleverdon.
|
||||
*/
|
||||
struct apic {
|
||||
char *name;
|
||||
|
||||
int (*probe)(void);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
int (*apic_id_registered)(void);
|
||||
|
||||
u32 irq_delivery_mode;
|
||||
u32 irq_dest_mode;
|
||||
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
|
||||
int disable_esr;
|
||||
|
||||
int dest_logical;
|
||||
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
|
||||
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
|
||||
void (*setup_portio_remap)(void);
|
||||
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
|
||||
void (*enable_apic_mode)(void);
|
||||
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
|
||||
|
||||
/*
|
||||
* When one of the next two hooks returns 1 the apic
|
||||
* is switched to this. Essentially they are additional
|
||||
* probe functions:
|
||||
*/
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
|
||||
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
|
||||
|
||||
int trampoline_phys_low;
|
||||
int trampoline_phys_high;
|
||||
|
||||
void (*wait_for_init_deassert)(atomic_t *deassert);
|
||||
void (*smp_callin_clear_local_apic)(void);
|
||||
void (*inquire_remote_apic)(int apicid);
|
||||
|
||||
/* apic ops */
|
||||
u32 (*read)(u32 reg);
|
||||
void (*write)(u32 reg, u32 v);
|
||||
u64 (*icr_read)(void);
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
};
|
||||
|
||||
/*
|
||||
* Pointer to the local APIC driver in use on this system (there's
|
||||
* always just one such driver in use - the kernel decides via an
|
||||
* early probing process which one it picks - and then sticks to it):
|
||||
*/
|
||||
extern struct apic *apic;
|
||||
|
||||
/*
|
||||
* APIC functionality to boot other CPUs - only used on SMP:
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
extern atomic_t init_deasserted;
|
||||
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
|
||||
#endif
|
||||
|
||||
static inline u32 apic_read(u32 reg)
|
||||
{
|
||||
return apic->read(reg);
|
||||
}
|
||||
|
||||
static inline void apic_write(u32 reg, u32 val)
|
||||
{
|
||||
apic->write(reg, val);
|
||||
}
|
||||
|
||||
static inline u64 apic_icr_read(void)
|
||||
{
|
||||
return apic->icr_read();
|
||||
}
|
||||
|
||||
static inline void apic_icr_write(u32 low, u32 high)
|
||||
{
|
||||
apic->icr_write(low, high);
|
||||
}
|
||||
|
||||
static inline void apic_wait_icr_idle(void)
|
||||
{
|
||||
apic->wait_icr_idle();
|
||||
}
|
||||
|
||||
static inline u32 safe_apic_wait_icr_idle(void)
|
||||
{
|
||||
return apic->safe_wait_icr_idle();
|
||||
}
|
||||
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
||||
if (APIC_XAPIC(ver))
|
||||
return (x >> 24) & 0xFF;
|
||||
else
|
||||
return (x >> 24) & 0x0F;
|
||||
}
|
||||
|
||||
/*
|
||||
* Warm reset vector default position:
|
||||
*/
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern struct apic apic_flat;
|
||||
extern struct apic apic_physflat;
|
||||
extern struct apic apic_x2apic_cluster;
|
||||
extern struct apic apic_x2apic_phys;
|
||||
extern int default_acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void apic_send_IPI_self(int vector);
|
||||
|
||||
extern struct apic apic_x2apic_uv_x;
|
||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
|
||||
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline void default_wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
return;
|
||||
}
|
||||
|
||||
extern void generic_bigsmp_probe(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
|
||||
static inline const struct cpumask *default_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
|
||||
|
||||
static inline unsigned int read_apic_id(void)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
reg = apic_read(APIC_ID);
|
||||
|
||||
return apic->get_apic_id(reg);
|
||||
}
|
||||
|
||||
extern void default_setup_apic_routing(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
extern void default_init_apic_ldr(void);
|
||||
|
||||
static inline int default_apic_id_registered(void)
|
||||
{
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0];
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
}
|
||||
|
||||
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
}
|
||||
|
||||
static inline unsigned long default_check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
return phys_map;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static inline int default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
return __default_cpu_present_to_apicid(mps_cpu);
|
||||
}
|
||||
|
||||
static inline int
|
||||
default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
|
||||
}
|
||||
#else
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern u8 cpu_2_logical_apicid[NR_CPUS];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APIC_H */
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#define APIC_ESR_SENDILL 0x00020
|
||||
#define APIC_ESR_RECVILL 0x00040
|
||||
#define APIC_ESR_ILLREGA 0x00080
|
||||
#define APIC_LVTCMCI 0x2f0
|
||||
#define APIC_ICR 0x300
|
||||
#define APIC_DEST_SELF 0x40000
|
||||
#define APIC_DEST_ALLINC 0x80000
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
#ifndef _ASM_X86_APICNUM_H
|
||||
#define _ASM_X86_APICNUM_H
|
||||
|
||||
/* define MAX_IO_APICS */
|
||||
#ifdef CONFIG_X86_32
|
||||
# define MAX_IO_APICS 64
|
||||
#else
|
||||
# define MAX_IO_APICS 128
|
||||
# define MAX_LOCAL_APIC 32768
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APICNUM_H */
|
|
@ -1,26 +0,0 @@
|
|||
#ifndef _ASM_X86_ARCH_HOOKS_H
|
||||
#define _ASM_X86_ARCH_HOOKS_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/*
|
||||
* linux/include/asm/arch_hooks.h
|
||||
*
|
||||
* define the architecture specific hooks
|
||||
*/
|
||||
|
||||
/* these aren't arch hooks, they are generic routines
|
||||
* that can be used by the hooks */
|
||||
extern void init_ISA_irqs(void);
|
||||
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
|
||||
|
||||
/* these are the defined hooks */
|
||||
extern void intr_init_hook(void);
|
||||
extern void pre_intr_init_hook(void);
|
||||
extern void pre_setup_arch_hook(void);
|
||||
extern void trap_init_hook(void);
|
||||
extern void pre_time_init_hook(void);
|
||||
extern void time_init_hook(void);
|
||||
extern void mca_nmi_hook(void);
|
||||
|
||||
#endif /* _ASM_X86_ARCH_HOOKS_H */
|
|
@ -1,155 +0,0 @@
|
|||
#ifndef __ASM_MACH_APIC_H
|
||||
#define __ASM_MACH_APIC_H
|
||||
|
||||
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
|
||||
#define esr_disable (1)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return &cpu_online_map;
|
||||
#else
|
||||
return &cpumask_of_cpu(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target proc */
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
id = xapic_phys_to_log_apicid(cpu);
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"Physflat", nr_ioapics);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xFFL);
|
||||
}
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask)
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_MACH_APICDEF_H
|
||||
#define __ASM_MACH_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0xFF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_MACH_IPI_H
|
||||
#define __ASM_MACH_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_IPI_H */
|
|
@ -1,26 +1,36 @@
|
|||
#ifndef _ASM_X86_BOOT_H
|
||||
#define _ASM_X86_BOOT_H
|
||||
|
||||
/* Don't touch these, unless you really know what you're doing. */
|
||||
#define DEF_SYSSEG 0x1000
|
||||
#define DEF_SYSSIZE 0x7F00
|
||||
|
||||
/* Internal svga startup constants */
|
||||
#define NORMAL_VGA 0xffff /* 80x25 mode */
|
||||
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
|
||||
#define ASK_VGA 0xfffd /* ask for it at bootup */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* Physical address where kernel should be loaded. */
|
||||
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
|
||||
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
||||
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#else /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define BOOT_HEAP_SIZE 0x7000
|
||||
#define BOOT_STACK_SIZE 0x4000
|
||||
#else
|
||||
#define BOOT_HEAP_SIZE 0x4000
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define BOOT_STACK_SIZE 0x4000
|
||||
#else
|
||||
#define BOOT_STACK_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_BOOT_H */
|
||||
|
|
|
@ -5,24 +5,43 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
/* Caches aren't brain-dead on the intel. */
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_range(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, pg) do { } while (0)
|
||||
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
static inline void flush_cache_all(void) { }
|
||||
static inline void flush_cache_mm(struct mm_struct *mm) { }
|
||||
static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
|
||||
static inline void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end) { }
|
||||
static inline void flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn) { }
|
||||
static inline void flush_dcache_page(struct page *page) { }
|
||||
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
|
||||
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
|
||||
static inline void flush_icache_range(unsigned long start,
|
||||
unsigned long end) { }
|
||||
static inline void flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page) { }
|
||||
static inline void flush_icache_user_range(struct vm_area_struct *vma,
|
||||
struct page *page,
|
||||
unsigned long addr,
|
||||
unsigned long len) { }
|
||||
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
|
||||
static inline void flush_cache_vunmap(unsigned long start,
|
||||
unsigned long end) { }
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
static inline void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
static inline void copy_from_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
#define PG_non_WB PG_arch_1
|
||||
PAGEFLAG(NonWB, non_WB)
|
||||
|
|
|
@ -1,5 +1,55 @@
|
|||
/*
|
||||
* Some macros to handle stack frames in assembly.
|
||||
|
||||
x86 function call convention, 64-bit:
|
||||
-------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
---------------------------------------------------------------------------
|
||||
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
|
||||
|
||||
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
|
||||
functions when it sees tail-call optimization possibilities) rflags is
|
||||
clobbered. Leftover arguments are passed over the stack frame.)
|
||||
|
||||
[*] In the frame-pointers case rbp is fixed to the stack frame.
|
||||
|
||||
[**] for struct return values wider than 64 bits the return convention is a
|
||||
bit more complex: up to 128 bits width we return small structures
|
||||
straight in rax, rdx. For structures larger than that (3 words or
|
||||
larger) the caller puts a pointer to an on-stack return struct
|
||||
[allocated in the caller's stack frame] into the first argument - i.e.
|
||||
into rdi. All other arguments shift up by one in this case.
|
||||
Fortunately this case is rare in the kernel.
|
||||
|
||||
For 32-bit we have the following conventions - kernel is built with
|
||||
-mregparm=3 and -freg-struct-return:
|
||||
|
||||
x86 function calling convention, 32-bit:
|
||||
----------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
-------------------------------------------------------------------------
|
||||
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
|
||||
|
||||
( here too esp is obviously invariant across normal function calls. eflags
|
||||
is clobbered. Leftover arguments are passed over the stack frame. )
|
||||
|
||||
[*] In the frame-pointers case ebp is fixed to the stack frame.
|
||||
|
||||
[**] We build with -freg-struct-return, which on 32-bit means similar
|
||||
semantics as on 64-bit: edx can be used for a second return value
|
||||
(i.e. covering integer and structure sizes up to 64 bits) - after that
|
||||
it gets more complex and more expensive: 3-word or larger struct returns
|
||||
get done in the caller's frame and the pointer to the return struct goes
|
||||
into regparm0, i.e. eax - the other arguments shift up and the
|
||||
function's register parameters degenerate to regparm=2 in essence.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* 64-bit system call stack frame layout defines and helpers,
|
||||
* for assembly code:
|
||||
*/
|
||||
|
||||
#define R15 0
|
||||
|
@ -9,7 +59,7 @@
|
|||
#define RBP 32
|
||||
#define RBX 40
|
||||
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here: */
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@ -22,7 +72,7 @@
|
|||
#define ORIG_RAX 120 /* + error_code */
|
||||
/* end of arguments */
|
||||
|
||||
/* cpu exception frame or undefined in case of fast syscall. */
|
||||
/* cpu exception frame or undefined in case of fast syscall: */
|
||||
#define RIP 128
|
||||
#define CS 136
|
||||
#define EFLAGS 144
|
||||
|
|
|
@ -7,6 +7,20 @@
|
|||
#include <linux/nodemask.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
extern void prefill_possible_map(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void prefill_possible_map(void) {}
|
||||
|
||||
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
|
||||
#define safe_smp_processor_id() 0
|
||||
#define stack_smp_processor_id() 0
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
struct x86_cpu {
|
||||
struct cpu cpu;
|
||||
};
|
||||
|
@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
|
|||
#endif
|
||||
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
extern unsigned int boot_cpu_id;
|
||||
|
||||
#endif /* _ASM_X86_CPU_H */
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
#ifndef _ASM_X86_CPU_DEBUG_H
|
||||
#define _ASM_X86_CPU_DEBUG_H
|
||||
|
||||
/*
|
||||
* CPU x86 architecture debug
|
||||
*
|
||||
* Copyright(C) 2009 Jaswinder Singh Rajput
|
||||
*/
|
||||
|
||||
/* Register flags */
|
||||
enum cpu_debug_bit {
|
||||
/* Model Specific Registers (MSRs) */
|
||||
CPU_MC_BIT, /* Machine Check */
|
||||
CPU_MONITOR_BIT, /* Monitor */
|
||||
CPU_TIME_BIT, /* Time */
|
||||
CPU_PMC_BIT, /* Performance Monitor */
|
||||
CPU_PLATFORM_BIT, /* Platform */
|
||||
CPU_APIC_BIT, /* APIC */
|
||||
CPU_POWERON_BIT, /* Power-on */
|
||||
CPU_CONTROL_BIT, /* Control */
|
||||
CPU_FEATURES_BIT, /* Features control */
|
||||
CPU_LBRANCH_BIT, /* Last Branch */
|
||||
CPU_BIOS_BIT, /* BIOS */
|
||||
CPU_FREQ_BIT, /* Frequency */
|
||||
CPU_MTTR_BIT, /* MTRR */
|
||||
CPU_PERF_BIT, /* Performance */
|
||||
CPU_CACHE_BIT, /* Cache */
|
||||
CPU_SYSENTER_BIT, /* Sysenter */
|
||||
CPU_THERM_BIT, /* Thermal */
|
||||
CPU_MISC_BIT, /* Miscellaneous */
|
||||
CPU_DEBUG_BIT, /* Debug */
|
||||
CPU_PAT_BIT, /* PAT */
|
||||
CPU_VMX_BIT, /* VMX */
|
||||
CPU_CALL_BIT, /* System Call */
|
||||
CPU_BASE_BIT, /* BASE Address */
|
||||
CPU_SMM_BIT, /* System mgmt mode */
|
||||
CPU_SVM_BIT, /*Secure Virtual Machine*/
|
||||
CPU_OSVM_BIT, /* OS-Visible Workaround*/
|
||||
/* Standard Registers */
|
||||
CPU_TSS_BIT, /* Task Stack Segment */
|
||||
CPU_CR_BIT, /* Control Registers */
|
||||
CPU_DT_BIT, /* Descriptor Table */
|
||||
/* End of Registers flags */
|
||||
CPU_REG_ALL_BIT, /* Select all Registers */
|
||||
};
|
||||
|
||||
#define CPU_REG_ALL (~0) /* Select all Registers */
|
||||
|
||||
#define CPU_MC (1 << CPU_MC_BIT)
|
||||
#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
|
||||
#define CPU_TIME (1 << CPU_TIME_BIT)
|
||||
#define CPU_PMC (1 << CPU_PMC_BIT)
|
||||
#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
|
||||
#define CPU_APIC (1 << CPU_APIC_BIT)
|
||||
#define CPU_POWERON (1 << CPU_POWERON_BIT)
|
||||
#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
|
||||
#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
|
||||
#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
|
||||
#define CPU_BIOS (1 << CPU_BIOS_BIT)
|
||||
#define CPU_FREQ (1 << CPU_FREQ_BIT)
|
||||
#define CPU_MTRR (1 << CPU_MTTR_BIT)
|
||||
#define CPU_PERF (1 << CPU_PERF_BIT)
|
||||
#define CPU_CACHE (1 << CPU_CACHE_BIT)
|
||||
#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
|
||||
#define CPU_THERM (1 << CPU_THERM_BIT)
|
||||
#define CPU_MISC (1 << CPU_MISC_BIT)
|
||||
#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
|
||||
#define CPU_PAT (1 << CPU_PAT_BIT)
|
||||
#define CPU_VMX (1 << CPU_VMX_BIT)
|
||||
#define CPU_CALL (1 << CPU_CALL_BIT)
|
||||
#define CPU_BASE (1 << CPU_BASE_BIT)
|
||||
#define CPU_SMM (1 << CPU_SMM_BIT)
|
||||
#define CPU_SVM (1 << CPU_SVM_BIT)
|
||||
#define CPU_OSVM (1 << CPU_OSVM_BIT)
|
||||
#define CPU_TSS (1 << CPU_TSS_BIT)
|
||||
#define CPU_CR (1 << CPU_CR_BIT)
|
||||
#define CPU_DT (1 << CPU_DT_BIT)
|
||||
|
||||
/* Register file flags */
|
||||
enum cpu_file_bit {
|
||||
CPU_INDEX_BIT, /* index */
|
||||
CPU_VALUE_BIT, /* value */
|
||||
};
|
||||
|
||||
#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
|
||||
|
||||
/*
|
||||
* DisplayFamily_DisplayModel Processor Families/Processor Number Series
|
||||
* -------------------------- ------------------------------------------
|
||||
* 05_01, 05_02, 05_04 Pentium, Pentium with MMX
|
||||
*
|
||||
* 06_01 Pentium Pro
|
||||
* 06_03, 06_05 Pentium II Xeon, Pentium II
|
||||
* 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
|
||||
*
|
||||
* 06_09, 060D Pentium M
|
||||
*
|
||||
* 06_0E Core Duo, Core Solo
|
||||
*
|
||||
* 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
|
||||
* Core 2 Quad, Core 2 Extreme, Core 2 Duo,
|
||||
* Pentium dual-core
|
||||
* 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
|
||||
*
|
||||
* 06_1C Atom
|
||||
*
|
||||
* 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
|
||||
* 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
|
||||
*
|
||||
* 0F_06 Xeon 7100, 5000 Series, Xeon MP,
|
||||
* Pentium 4, Pentium D
|
||||
*/
|
||||
|
||||
/* Register processors bits */
|
||||
enum cpu_processor_bit {
|
||||
CPU_NONE,
|
||||
/* Intel */
|
||||
CPU_INTEL_PENTIUM_BIT,
|
||||
CPU_INTEL_P6_BIT,
|
||||
CPU_INTEL_PENTIUM_M_BIT,
|
||||
CPU_INTEL_CORE_BIT,
|
||||
CPU_INTEL_CORE2_BIT,
|
||||
CPU_INTEL_ATOM_BIT,
|
||||
CPU_INTEL_XEON_P4_BIT,
|
||||
CPU_INTEL_XEON_MP_BIT,
|
||||
};
|
||||
|
||||
#define CPU_ALL (~0) /* Select all CPUs */
|
||||
|
||||
#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
|
||||
#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
|
||||
#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
|
||||
#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
|
||||
#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
|
||||
#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
|
||||
#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
|
||||
#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
|
||||
|
||||
#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
|
||||
#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
|
||||
#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
|
||||
#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
|
||||
#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
|
||||
#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
|
||||
#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
|
||||
#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
|
||||
#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
|
||||
#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
|
||||
#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
|
||||
#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
|
||||
#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
|
||||
#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
|
||||
#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
|
||||
#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
|
||||
#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
|
||||
#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
|
||||
#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
|
||||
|
||||
/* Select all Intel CPUs*/
|
||||
#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
|
||||
|
||||
#define MAX_CPU_FILES 512
|
||||
|
||||
struct cpu_private {
|
||||
unsigned cpu;
|
||||
unsigned type;
|
||||
unsigned reg;
|
||||
unsigned file;
|
||||
};
|
||||
|
||||
struct cpu_debug_base {
|
||||
char *name; /* Register name */
|
||||
unsigned flag; /* Register flag */
|
||||
};
|
||||
|
||||
struct cpu_cpuX_base {
|
||||
struct dentry *dentry; /* Register dentry */
|
||||
int init; /* Register index file */
|
||||
};
|
||||
|
||||
struct cpu_file_base {
|
||||
char *name; /* Register file name */
|
||||
unsigned flag; /* Register file flag */
|
||||
};
|
||||
|
||||
struct cpu_debug_range {
|
||||
unsigned min; /* Register range min */
|
||||
unsigned max; /* Register range max */
|
||||
unsigned flag; /* Supported flags */
|
||||
unsigned model; /* Supported models */
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_CPU_DEBUG_H */
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef _ASM_X86_CPUMASK_H
|
||||
#define _ASM_X86_CPUMASK_H
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
extern cpumask_var_t cpu_callin_mask;
|
||||
extern cpumask_var_t cpu_callout_mask;
|
||||
extern cpumask_var_t cpu_initialized_mask;
|
||||
extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern cpumask_t cpu_callin_map;
|
||||
extern cpumask_t cpu_callout_map;
|
||||
extern cpumask_t cpu_initialized;
|
||||
extern cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
|
||||
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
|
||||
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
|
||||
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
|
||||
|
||||
static inline void setup_cpu_local_masks(void) { }
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче