Merge branch 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (682 commits) percpu: fix spurious alignment WARN in legacy SMP percpu allocator percpu: generalize embedding first chunk setup helper percpu: more flexibility for @dyn_size of pcpu_setup_first_chunk() percpu: make x86 addr <-> pcpu ptr conversion macros generic linker script: define __per_cpu_load on all SMP capable archs x86: UV: remove uv_flush_tlb_others() WARN_ON percpu: finer grained locking to break deadlock and allow atomic free percpu: move fully free chunk reclamation into a work percpu: move chunk area map extension out of area allocation percpu: replace pcpu_realloc() with pcpu_mem_alloc() and pcpu_mem_free() x86, percpu: setup reserved percpu area for x86_64 percpu, module: implement reserved allocation and use it for module percpu variables percpu: add an indirection ptr for chunk page map access x86: make embedding percpu allocator return excessive free space percpu: use negative for auto for pcpu_setup_first_chunk() arguments percpu: improve first chunk initial area map handling percpu: cosmetic renames in pcpu_setup_first_chunk() percpu: clean up percpu constants x86: un-__init fill_pud/pmd/pte x86: remove vestigial fix_ioremap prototypes ... Manually merge conflicts in arch/ia64/kernel/irq_ia64.c
This commit is contained in:
Коммит
7c730ccdc1
|
@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
|
|||
these macros in include/asm-XXX/topology.h:
|
||||
#define topology_physical_package_id(cpu)
|
||||
#define topology_core_id(cpu)
|
||||
#define topology_thread_siblings(cpu)
|
||||
#define topology_core_siblings(cpu)
|
||||
#define topology_thread_cpumask(cpu)
|
||||
#define topology_core_cpumask(cpu)
|
||||
|
||||
The type of **_id is int.
|
||||
The type of siblings is cpumask_t.
|
||||
The type of siblings is (const) struct cpumask *.
|
||||
|
||||
To be consistent on all architectures, include/linux/topology.h
|
||||
provides default definitions for any of the above macros that are
|
||||
|
|
|
@ -1325,8 +1325,13 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
|
||||
memtest= [KNL,X86] Enable memtest
|
||||
Format: <integer>
|
||||
range: 0,4 : pattern number
|
||||
default : 0 <disable>
|
||||
Specifies the number of memtest passes to be
|
||||
performed. Each pass selects another test
|
||||
pattern from a given set of patterns. Memtest
|
||||
fills the memory with this pattern, validates
|
||||
memory contents and reserves bad memory
|
||||
regions that are detected.
|
||||
|
||||
meye.*= [HW] Set MotionEye Camera parameters
|
||||
See Documentation/video4linux/meye.txt.
|
||||
|
|
|
@ -158,7 +158,7 @@ Offset Proto Name Meaning
|
|||
0202/4 2.00+ header Magic signature "HdrS"
|
||||
0206/2 2.00+ version Boot protocol version supported
|
||||
0208/4 2.00+ realmode_swtch Boot loader hook (see below)
|
||||
020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete)
|
||||
020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
|
||||
020E/2 2.00+ kernel_version Pointer to kernel version string
|
||||
0210/1 2.00+ type_of_loader Boot loader identifier
|
||||
0211/1 2.00+ loadflags Boot protocol option flags
|
||||
|
@ -170,10 +170,11 @@ Offset Proto Name Meaning
|
|||
0224/2 2.01+ heap_end_ptr Free memory after setup end
|
||||
0226/2 N/A pad1 Unused
|
||||
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
|
||||
022C/4 2.03+ initrd_addr_max Highest legal initrd address
|
||||
022C/4 2.03+ ramdisk_max Highest legal initrd address
|
||||
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
|
||||
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
|
||||
0235/3 N/A pad2 Unused
|
||||
0235/1 N/A pad2 Unused
|
||||
0236/2 N/A pad3 Unused
|
||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
||||
|
@ -299,14 +300,14 @@ Protocol: 2.00+
|
|||
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
|
||||
10.17.
|
||||
|
||||
Field name: readmode_swtch
|
||||
Field name: realmode_swtch
|
||||
Type: modify (optional)
|
||||
Offset/size: 0x208/4
|
||||
Protocol: 2.00+
|
||||
|
||||
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
|
||||
|
||||
Field name: start_sys
|
||||
Field name: start_sys_seg
|
||||
Type: read
|
||||
Offset/size: 0x20c/2
|
||||
Protocol: 2.00+
|
||||
|
@ -468,7 +469,7 @@ Protocol: 2.02+
|
|||
zero, the kernel will assume that your boot loader does not support
|
||||
the 2.02+ protocol.
|
||||
|
||||
Field name: initrd_addr_max
|
||||
Field name: ramdisk_max
|
||||
Type: read
|
||||
Offset/size: 0x22c/4
|
||||
Protocol: 2.03+
|
||||
|
@ -542,7 +543,10 @@ Protocol: 2.08+
|
|||
|
||||
The payload may be compressed. The format of both the compressed and
|
||||
uncompressed data should be determined using the standard magic
|
||||
numbers. Currently only gzip compressed ELF is used.
|
||||
numbers. The currently supported compression formats are gzip
|
||||
(magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
|
||||
(magic number 5D 00). The uncompressed payload is currently always ELF
|
||||
(magic number 7F 45 4C 46).
|
||||
|
||||
Field name: payload_length
|
||||
Type: read
|
||||
|
|
3
Makefile
3
Makefile
|
@ -533,8 +533,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
|
|||
endif
|
||||
|
||||
# Force gcc to behave correct even for buggy distributions
|
||||
# Arch Makefiles may override this setting
|
||||
ifndef CONFIG_CC_STACKPROTECTOR
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FRAME_POINTER
|
||||
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
|
|
|
@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
|
|||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||
last_cpu = cpu;
|
||||
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -189,9 +189,21 @@ callback_init(void * kernel_end)
|
|||
|
||||
if (alpha_using_srm) {
|
||||
static struct vm_struct console_remap_vm;
|
||||
unsigned long vaddr = VMALLOC_START;
|
||||
unsigned long nr_pages = 0;
|
||||
unsigned long vaddr;
|
||||
unsigned long i, j;
|
||||
|
||||
/* calculate needed size */
|
||||
for (i = 0; i < crb->map_entries; ++i)
|
||||
nr_pages += crb->map[i].count;
|
||||
|
||||
/* register the vm area */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.size = nr_pages << PAGE_SHIFT;
|
||||
vm_area_register_early(&console_remap_vm, PAGE_SIZE);
|
||||
|
||||
vaddr = (unsigned long)console_remap_vm.addr;
|
||||
|
||||
/* Set up the third level PTEs and update the virtual
|
||||
addresses of the CRB entries. */
|
||||
for (i = 0; i < crb->map_entries; ++i) {
|
||||
|
@ -213,12 +225,6 @@ callback_init(void * kernel_end)
|
|||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Let vmalloc know that we've allocated some space. */
|
||||
console_remap_vm.flags = VM_ALLOC;
|
||||
console_remap_vm.addr = (void *) VMALLOC_START;
|
||||
console_remap_vm.size = vaddr - VMALLOC_START;
|
||||
vmlist = &console_remap_vm;
|
||||
}
|
||||
|
||||
callback_init_done = 1;
|
||||
|
|
|
@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
|
|||
.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
|
||||
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
|
||||
* come via this function. Instead, they should provide their
|
||||
|
@ -161,7 +166,7 @@ void __init init_IRQ(void)
|
|||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bad_irq_desc.affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(bad_irq_desc.affinity);
|
||||
bad_irq_desc.cpu = smp_processor_id();
|
||||
#endif
|
||||
init_arch_irq();
|
||||
|
@ -191,15 +196,16 @@ void migrate_irqs(void)
|
|||
struct irq_desc *desc = irq_desc + i;
|
||||
|
||||
if (desc->cpu == cpu) {
|
||||
unsigned int newcpu = any_online_cpu(desc->affinity);
|
||||
|
||||
if (newcpu == NR_CPUS) {
|
||||
unsigned int newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
|
||||
i, cpu);
|
||||
|
||||
cpus_setall(desc->affinity);
|
||||
newcpu = any_online_cpu(desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
route_irq(desc, i, newcpu);
|
||||
|
|
|
@ -64,7 +64,9 @@ SECTIONS
|
|||
__initramfs_end = .;
|
||||
#endif
|
||||
. = ALIGN(4096);
|
||||
__per_cpu_load = .;
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu.page_aligned)
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
|
|
|
@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
|
|||
const struct cpumask *mask = cpumask_of(cpu);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
desc->affinity = *mask;
|
||||
cpumask_copy(desc->affinity, mask);
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
|
|||
config QUICKLIST
|
||||
def_bool y
|
||||
|
||||
config HAVE_ARCH_BOOTMEM_NODE
|
||||
config HAVE_ARCH_BOOTMEM
|
||||
def_bool n
|
||||
|
||||
config ARCH_HAVE_MEMORY_PRESENT
|
||||
|
|
|
@ -3,14 +3,4 @@
|
|||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE 8192
|
||||
#else
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#endif
|
||||
|
||||
#define PERCPU_ENOUGH_ROOM \
|
||||
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
||||
PERCPU_MODULE_RESERVE)
|
||||
|
||||
#endif /* __ARCH_BLACKFIN_PERCPU__ */
|
||||
|
|
|
@ -70,6 +70,11 @@ static struct irq_desc bad_irq_desc = {
|
|||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating a variable-sized bad_irq_desc.affinity */
|
||||
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
|
|
|
@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
|
|||
|
||||
#else /* ! SMP */
|
||||
|
||||
#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
|
||||
|
||||
#define per_cpu_init() (__phys_per_cpu_start)
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
|
||||
/*
|
||||
* Be extremely careful when taking the address of this variable! Due to virtual
|
||||
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
|
||||
|
|
|
@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
|
|||
.child = NULL, \
|
||||
.groups = NULL, \
|
||||
.min_interval = 8, \
|
||||
.max_interval = 8*(min(num_online_cpus(), 32)), \
|
||||
.max_interval = 8*(min(num_online_cpus(), 32U)), \
|
||||
.busy_factor = 64, \
|
||||
.imbalance_pct = 125, \
|
||||
.cache_nice_tries = 2, \
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#ifndef _ASM_IA64_UV_UV_H
|
||||
#define _ASM_IA64_UV_UV_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/sn/simulator.h>
|
||||
|
||||
static inline int is_uv_system(void)
|
||||
{
|
||||
/* temporary support for running on hardware simulator */
|
||||
return IS_MEDUSA() || ia64_platform_is("uv");
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_UV_UV_H */
|
|
@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
|
|||
return __va(phys_addr);
|
||||
}
|
||||
|
||||
void __init __acpi_unmap_table(char *map, unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Boot-time Table Parsing
|
||||
-------------------------------------------------------------------------- */
|
||||
|
|
|
@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
|
|||
if (iosapic_intr_info[irq].count == 0) {
|
||||
#ifdef CONFIG_SMP
|
||||
/* Clear affinity */
|
||||
cpus_setall(idesc->affinity);
|
||||
cpumask_setall(idesc->affinity);
|
||||
#endif
|
||||
/* Clear the interrupt information */
|
||||
iosapic_intr_info[irq].dest = 0;
|
||||
|
|
|
@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
|
|||
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
||||
{
|
||||
if (irq < NR_IRQS) {
|
||||
cpumask_copy(&irq_desc[irq].affinity,
|
||||
cpumask_copy(irq_desc[irq].affinity,
|
||||
cpumask_of(cpu_logical_id(hwid)));
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ static void migrate_irqs(void)
|
|||
if (desc->status == IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
|
||||
>= nr_cpu_ids) {
|
||||
/*
|
||||
* Save it for phase 2 processing
|
||||
|
|
|
@ -493,16 +493,15 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
|||
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
|
||||
ia64_srlz_d();
|
||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||
struct irq_desc *desc;
|
||||
int irq = local_vector_to_irq(vector);
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
desc = irq_desc + irq;
|
||||
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||
smp_local_flush_tlb();
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||
} else if (unlikely(IS_RESCHEDULE(vector))) {
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
else {
|
||||
} else {
|
||||
ia64_setreg(_IA64_REG_CR_TPR, vector);
|
||||
ia64_srlz_d();
|
||||
|
||||
|
@ -553,16 +552,15 @@ void ia64_process_pending_intr(void)
|
|||
* Perform normal interrupt style processing
|
||||
*/
|
||||
while (vector != IA64_SPURIOUS_INT_VECTOR) {
|
||||
struct irq_desc *desc;
|
||||
int irq = local_vector_to_irq(vector);
|
||||
desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
|
||||
smp_local_flush_tlb();
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
} else if (unlikely(IS_RESCHEDULE(vector)))
|
||||
} else if (unlikely(IS_RESCHEDULE(vector))) {
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
else {
|
||||
} else {
|
||||
struct pt_regs *old_regs = set_irq_regs(NULL);
|
||||
|
||||
ia64_setreg(_IA64_REG_CR_TPR, vector);
|
||||
|
|
|
@ -38,7 +38,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
|||
msg.data = data;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -150,7 +150,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
|||
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
irq_desc[irq].affinity = *mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -213,16 +213,9 @@ SECTIONS
|
|||
{ *(.data.cacheline_aligned) }
|
||||
|
||||
/* Per-cpu data: */
|
||||
percpu : { } :percpu
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
__phys_per_cpu_start = .;
|
||||
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
|
||||
{
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
}
|
||||
PERCPU_VADDR(PERCPU_ADDR, :percpu)
|
||||
__phys_per_cpu_start = __per_cpu_load;
|
||||
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
|
||||
* into percpu page size
|
||||
*/
|
||||
|
|
|
@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
|||
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = *cpu_mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
|
|||
*/
|
||||
#define IRQ_AFFINITY_HOOK(irq) \
|
||||
do { \
|
||||
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
|
||||
if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
|
||||
smtc_forward_irq(irq); \
|
||||
irq_exit(); \
|
||||
return; \
|
||||
|
|
|
@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
|
||||
}
|
||||
irq_desc[irq].affinity = *cpumask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
}
|
||||
|
|
|
@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
|
|||
* and efficiency, we just pick the easiest one to find.
|
||||
*/
|
||||
|
||||
target = first_cpu(irq_desc[irq].affinity);
|
||||
target = cpumask_first(irq_desc[irq].affinity);
|
||||
|
||||
/*
|
||||
* We depend on the platform code to have correctly processed
|
||||
|
@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
|
|||
struct clock_event_device *cd;
|
||||
void *arg_copy = pipi->arg;
|
||||
int type_copy = pipi->type;
|
||||
int irq = MIPS_CPU_IRQ_BASE + 1;
|
||||
|
||||
smtc_ipi_nq(&freeIPIq, pipi);
|
||||
switch (type_copy) {
|
||||
case SMTC_CLOCK_TICK:
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
cd->event_handler(cd);
|
||||
irq_exit();
|
||||
|
|
|
@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
|
|||
|
||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_t tmask = *affinity;
|
||||
cpumask_t tmask;
|
||||
int cpu = 0;
|
||||
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
|
||||
|
||||
|
@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
|||
* be made to forward to an offline "CPU".
|
||||
*/
|
||||
|
||||
cpumask_copy(&tmask, affinity);
|
||||
for_each_cpu(cpu, affinity) {
|
||||
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
|
||||
cpu_clear(cpu, tmask);
|
||||
}
|
||||
irq_desc[irq].affinity = tmask;
|
||||
cpumask_copy(irq_desc[irq].affinity, &tmask);
|
||||
|
||||
if (cpus_empty(tmask))
|
||||
/*
|
||||
|
|
|
@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
|
|||
int irq = SGI_BUSERR_IRQ;
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
ip22_be_interrupt(irq);
|
||||
irq_exit();
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
|
|||
char c;
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
|
||||
ArcRead(0, &c, 1, &cnt);
|
||||
ArcEnterInteractiveMode();
|
||||
|
|
|
@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
|
|||
void bcm1480_mailbox_interrupt(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int irq = K_BCM1480_INT_MBOX_0_0;
|
||||
unsigned int action;
|
||||
|
||||
kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
/* Load the mailbox register to figure out what we're supposed to do */
|
||||
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
|
||||
|
||||
|
|
|
@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
|
|||
void sb1250_mailbox_interrupt(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int irq = K_INT_MBOX_0;
|
||||
unsigned int action;
|
||||
|
||||
kstat_this_cpu.irqs[K_INT_MBOX_0]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
/* Load the mailbox register to figure out what we're supposed to do */
|
||||
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
|
||||
|
||||
|
|
|
@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
|
|||
* the stack NMI-atomically, it's safe to use smp_processor_id().
|
||||
*/
|
||||
int sum, cpu = smp_processor_id();
|
||||
int irq = NMIIRQ;
|
||||
u8 wdt, tmp;
|
||||
|
||||
wdt = WDCTR & ~WDCTR_WDCNE;
|
||||
|
@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
|
|||
NMICR = NMICR_WDIF;
|
||||
|
||||
nmi_count(cpu)++;
|
||||
kstat_this_cpu.irqs[NMIIRQ]++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
sum = irq_stat[cpu].__irq_count;
|
||||
|
||||
if (last_irq_sums[cpu] == sum) {
|
||||
|
|
|
@ -138,7 +138,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
|||
if (cpu_dest < 0)
|
||||
return;
|
||||
|
||||
cpumask_copy(&irq_desc[irq].affinity, &cpumask_of_cpu(cpu_dest));
|
||||
cpumask_copy(&irq_desc[irq].affinity, dest);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
|
|||
if (irq_desc[irq].status & IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpumask_and(&mask, irq_desc[irq].affinity, &map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
|
|
|
@ -181,13 +181,7 @@ SECTIONS
|
|||
__initramfs_end = .;
|
||||
}
|
||||
#endif
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
|
||||
__per_cpu_start = .;
|
||||
*(.data.percpu)
|
||||
*(.data.percpu.shared_aligned)
|
||||
__per_cpu_end = .;
|
||||
}
|
||||
PERCPU(PAGE_SIZE)
|
||||
|
||||
. = ALIGN(8);
|
||||
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
|
||||
|
|
|
@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
|
|||
{
|
||||
int server;
|
||||
/* For the moment only implement delivery to all cpus or one cpu */
|
||||
cpumask_t cpumask = irq_desc[virq].affinity;
|
||||
cpumask_t cpumask;
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
|
||||
cpumask_copy(&cpumask, irq_desc[virq].affinity);
|
||||
if (!distribute_irqs)
|
||||
return default_server;
|
||||
|
||||
|
@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
|
|||
virq, cpu);
|
||||
|
||||
/* Reset affinity to all cpus */
|
||||
irq_desc[virq].affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(irq_desc[virq].affinity);
|
||||
desc->chip->set_affinity(virq, cpu_all_mask);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
|
|
@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
|
|||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
|
|
|
@ -252,9 +252,10 @@ struct irq_handler_data {
|
|||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
|
@ -805,7 +806,7 @@ void fixup_irqs(void)
|
|||
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq,
|
||||
&irq_desc[irq].affinity);
|
||||
irq_desc[irq].affinity);
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||
}
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/starfire.h>
|
||||
|
@ -724,14 +724,12 @@ void timer_interrupt(int irq, struct pt_regs *regs)
|
|||
unsigned long tick_mask = tick_ops->softint_mask;
|
||||
int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
|
||||
struct irq_desc *desc;
|
||||
|
||||
clear_softint(tick_mask);
|
||||
|
||||
irq_enter();
|
||||
|
||||
desc = irq_to_desc(0);
|
||||
kstat_incr_irqs_this_cpu(0, desc);
|
||||
kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
|
||||
|
||||
if (unlikely(!evt->event_handler)) {
|
||||
printk(KERN_WARNING
|
||||
|
|
651
arch/x86/Kconfig
651
arch/x86/Kconfig
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -50,7 +50,7 @@ config M386
|
|||
config M486
|
||||
bool "486"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a 486 series processor, either Intel or one of the
|
||||
compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
|
||||
DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
|
||||
|
@ -59,7 +59,7 @@ config M486
|
|||
config M586
|
||||
bool "586/K5/5x86/6x86/6x86MX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an 586 or 686 series processor such as the AMD K5,
|
||||
the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
|
||||
assume the RDTSC (Read Time Stamp Counter) instruction.
|
||||
|
@ -67,21 +67,21 @@ config M586
|
|||
config M586TSC
|
||||
bool "Pentium-Classic"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium Classic processor with the RDTSC (Read
|
||||
Time Stamp Counter) instruction for benchmarking.
|
||||
|
||||
config M586MMX
|
||||
bool "Pentium-MMX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Pentium with the MMX graphics/multimedia
|
||||
extended instructions.
|
||||
|
||||
config M686
|
||||
bool "Pentium-Pro"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium Pro chips. This enables the use of
|
||||
Pentium Pro extended instructions, and disables the init-time guard
|
||||
against the f00f bug found in earlier Pentiums.
|
||||
|
@ -89,7 +89,7 @@ config M686
|
|||
config MPENTIUMII
|
||||
bool "Pentium-II/Celeron(pre-Coppermine)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-II and
|
||||
pre-Coppermine Celeron core. This option enables an unaligned
|
||||
copy optimization, compiles the kernel with optimization flags
|
||||
|
@ -99,7 +99,7 @@ config MPENTIUMII
|
|||
config MPENTIUMIII
|
||||
bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel chips based on the Pentium-III and
|
||||
Celeron-Coppermine core. This option enables use of some
|
||||
extended prefetch instructions in addition to the Pentium II
|
||||
|
@ -108,14 +108,14 @@ config MPENTIUMIII
|
|||
config MPENTIUMM
|
||||
bool "Pentium M"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium M (not Pentium-4 M)
|
||||
notebook chips.
|
||||
|
||||
config MPENTIUM4
|
||||
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for Intel Pentium 4 chips. This includes the
|
||||
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
|
||||
Pentium-4 M (not Pentium M) chips. This option enables compile
|
||||
|
@ -151,7 +151,7 @@ config MPENTIUM4
|
|||
config MK6
|
||||
bool "K6/K6-II/K6-III"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD K6-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
@ -159,14 +159,14 @@ config MK6
|
|||
config MK7
|
||||
bool "Athlon/Duron/K7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Athlon K7-family processor. Enables use of
|
||||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
bool "Opteron/Athlon64/Hammer/K8"
|
||||
help
|
||||
---help---
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
@ -174,7 +174,7 @@ config MK8
|
|||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Crusoe processor. Treats the processor
|
||||
like a 586 with TSC, and sets some GCC optimization flags (like a
|
||||
Pentium Pro with no alignment requirements).
|
||||
|
@ -182,13 +182,13 @@ config MCRUSOE
|
|||
config MEFFICEON
|
||||
bool "Efficeon"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Transmeta Efficeon processor.
|
||||
|
||||
config MWINCHIPC6
|
||||
bool "Winchip-C6"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip C6 chip. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements.
|
||||
|
@ -196,7 +196,7 @@ config MWINCHIPC6
|
|||
config MWINCHIP3D
|
||||
bool "Winchip-2/Winchip-2A/Winchip-3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
|
||||
treat this chip as a 586TSC with some extended instructions
|
||||
and alignment requirements. Also enable out of order memory
|
||||
|
@ -206,19 +206,19 @@ config MWINCHIP3D
|
|||
config MGEODEGX1
|
||||
bool "GeodeGX1"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Geode GX1 (Cyrix MediaGX) chip.
|
||||
|
||||
config MGEODE_LX
|
||||
bool "Geode GX/LX"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for AMD Geode GX and LX processors.
|
||||
|
||||
config MCYRIXIII
|
||||
bool "CyrixIII/VIA-C3"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a Cyrix III or C3 chip. Presently Linux and GCC
|
||||
treat this chip as a generic 586. Whilst the CPU is 686 class,
|
||||
it lacks the cmov extension which gcc assumes is present when
|
||||
|
@ -230,7 +230,7 @@ config MCYRIXIII
|
|||
config MVIAC3_2
|
||||
bool "VIA C3-2 (Nehemiah)"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C3 "Nehemiah". Selecting this enables usage
|
||||
of SSE and tells gcc to treat the CPU as a 686.
|
||||
Note, this kernel will not boot on older (pre model 9) C3s.
|
||||
|
@ -238,14 +238,14 @@ config MVIAC3_2
|
|||
config MVIAC7
|
||||
bool "VIA C7"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Select this for a VIA C7. Selecting this uses the correct cache
|
||||
shift and tells gcc to treat the CPU as a 686.
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
Xeon CPUs with Intel 64bit which is compatible with x86-64.
|
||||
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
|
||||
|
@ -255,7 +255,7 @@ config MPSC
|
|||
|
||||
config MCORE2
|
||||
bool "Core 2/newer Xeon"
|
||||
help
|
||||
---help---
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
53xx) CPUs. You can distinguish newer from older Xeons by the CPU
|
||||
|
@ -265,7 +265,7 @@ config MCORE2
|
|||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
|
@ -274,7 +274,7 @@ endchoice
|
|||
config X86_GENERIC
|
||||
bool "Generic x86 support"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
Instead of just including optimizations for the selected
|
||||
x86 variant (e.g. PII, Crusoe or Athlon), include some more
|
||||
generic optimizations as well. This will make the kernel
|
||||
|
@ -294,25 +294,23 @@ config X86_CPU
|
|||
# Define implied options from the CPU selection here
|
||||
config X86_L1_CACHE_BYTES
|
||||
int
|
||||
default "128" if GENERIC_CPU || MPSC
|
||||
default "64" if MK8 || MCORE2
|
||||
depends on X86_64
|
||||
default "128" if MPSC
|
||||
default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
|
||||
|
||||
config X86_INTERNODE_CACHE_BYTES
|
||||
int
|
||||
default "4096" if X86_VSMP
|
||||
default X86_L1_CACHE_BYTES if !X86_VSMP
|
||||
depends on X86_64
|
||||
|
||||
config X86_CMPXCHG
|
||||
def_bool X86_64 || (X86_32 && !M386)
|
||||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
|
||||
config X86_XADD
|
||||
def_bool y
|
||||
|
@ -321,7 +319,7 @@ config X86_XADD
|
|||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
|
||||
help
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
|
@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
|
|||
|
||||
menuconfig PROCESSOR_SELECT
|
||||
bool "Supported processor vendors" if EMBEDDED
|
||||
help
|
||||
---help---
|
||||
This lets you choose what x86 vendor support code your kernel
|
||||
will include.
|
||||
|
||||
config CPU_SUP_INTEL
|
||||
default y
|
||||
bool "Support Intel processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Intel processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
|
|||
default y
|
||||
bool "Support Cyrix processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Cyrix processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
|
|||
config CPU_SUP_AMD
|
||||
default y
|
||||
bool "Support AMD processors" if PROCESSOR_SELECT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for AMD processors
|
||||
|
||||
You need this enabled if you want your kernel to run on an
|
||||
|
@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
|
|||
default y
|
||||
bool "Support Centaur processors" if PROCESSOR_SELECT
|
||||
depends on 64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Centaur processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
|
|||
default y
|
||||
bool "Support Transmeta processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for Transmeta processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
|
|||
default y
|
||||
bool "Support UMC processors" if PROCESSOR_SELECT
|
||||
depends on !64BIT
|
||||
help
|
||||
---help---
|
||||
This enables detection, tunings and quirks for UMC processors
|
||||
|
||||
You need this enabled if you want your kernel to run on a
|
||||
|
@ -523,7 +521,7 @@ config X86_PTRACE_BTS
|
|||
bool "Branch Trace Store"
|
||||
default y
|
||||
depends on X86_DEBUGCTLMSR
|
||||
help
|
||||
---help---
|
||||
This adds a ptrace interface to the hardware's branch trace store.
|
||||
|
||||
Debuggers may use it to collect an execution trace of the debugged
|
||||
|
|
|
@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
|
|||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
help
|
||||
---help---
|
||||
If this option is disabled, you allow userspace (root) access to all
|
||||
of memory, including kernel and userspace memory. Accidental
|
||||
access to this is obviously disastrous, but specific access can
|
||||
|
@ -25,7 +25,7 @@ config STRICT_DEVMEM
|
|||
config X86_VERBOSE_BOOTUP
|
||||
bool "Enable verbose x86 bootup info messages"
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Enables the informational output from the decompression stage
|
||||
(e.g. bzImage) of the boot. If you disable this you will still
|
||||
see errors. Disable this if you want silent bootup.
|
||||
|
@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
|
|||
config EARLY_PRINTK
|
||||
bool "Early printk" if EMBEDDED
|
||||
default y
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the VGA buffer or to a serial
|
||||
port.
|
||||
|
||||
|
@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
|
|||
bool "Early printk via EHCI debug port"
|
||||
default n
|
||||
depends on EARLY_PRINTK && PCI
|
||||
help
|
||||
---help---
|
||||
Write kernel log output directly into the EHCI debug port.
|
||||
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
|
@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
|
|||
config DEBUG_STACKOVERFLOW
|
||||
bool "Check for stack overflows"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
This option will cause messages to be printed if free stack space
|
||||
drops below a certain limit.
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Enables the display of the minimum amount of free stack which each
|
||||
task has ever had available in the sysrq-T and sysrq-P debug output.
|
||||
|
||||
|
@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
|
|||
config DEBUG_PAGEALLOC
|
||||
bool "Debug page memory allocations"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Unmap pages from the kernel linear mapping after free_pages().
|
||||
This results in a large slowdown, but helps to find certain types
|
||||
of memory corruptions.
|
||||
|
@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
|
|||
config DEBUG_PER_CPU_MAPS
|
||||
bool "Debug access to per_cpu maps"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86_SMP
|
||||
depends on SMP
|
||||
default n
|
||||
help
|
||||
---help---
|
||||
Say Y to verify that the per_cpu map being accessed has
|
||||
been setup. Adds a fair amount of code to kernel memory
|
||||
and decreases performance.
|
||||
|
@ -96,7 +96,7 @@ config X86_PTDUMP
|
|||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
select DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
Say Y here if you want to show the kernel pagetable layout in a
|
||||
debugfs file. This information is only useful for kernel developers
|
||||
who are working in architecture specific areas of the kernel.
|
||||
|
@ -108,7 +108,7 @@ config DEBUG_RODATA
|
|||
bool "Write protect kernel read-only data structures"
|
||||
default y
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Mark the kernel read-only data as write-protected in the pagetables,
|
||||
in order to catch accidental (and incorrect) writes to such const
|
||||
data. This is recommended so that we can catch kernel bugs sooner.
|
||||
|
@ -117,7 +117,8 @@ config DEBUG_RODATA
|
|||
config DEBUG_RODATA_TEST
|
||||
bool "Testcase for the DEBUG_RODATA feature"
|
||||
depends on DEBUG_RODATA
|
||||
help
|
||||
default y
|
||||
---help---
|
||||
This option enables a testcase for the DEBUG_RODATA
|
||||
feature as well as for the change_page_attr() infrastructure.
|
||||
If in doubt, say "N"
|
||||
|
@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
|
|||
config DEBUG_NX_TEST
|
||||
tristate "Testcase for the NX non-executable stack feature"
|
||||
depends on DEBUG_KERNEL && m
|
||||
help
|
||||
---help---
|
||||
This option enables a testcase for the CPU NX capability
|
||||
and the software setup of this feature.
|
||||
If in doubt, say "N"
|
||||
|
@ -133,7 +134,7 @@ config DEBUG_NX_TEST
|
|||
config 4KSTACKS
|
||||
bool "Use 4Kb for kernel stacks instead of 8Kb"
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
If you say Y here the kernel will use a 4Kb stacksize for the
|
||||
kernel stack attached to each process/thread. This facilitates
|
||||
running more threads on a system and also reduces the pressure
|
||||
|
@ -144,7 +145,7 @@ config DOUBLEFAULT
|
|||
default y
|
||||
bool "Enable doublefault exception handler" if EMBEDDED
|
||||
depends on X86_32
|
||||
help
|
||||
---help---
|
||||
This option allows trapping of rare doublefault exceptions that
|
||||
would otherwise cause a system to silently reboot. Disabling this
|
||||
option saves about 4k and might cause you much additional grey
|
||||
|
@ -154,7 +155,7 @@ config IOMMU_DEBUG
|
|||
bool "Enable IOMMU debugging"
|
||||
depends on GART_IOMMU && DEBUG_KERNEL
|
||||
depends on X86_64
|
||||
help
|
||||
---help---
|
||||
Force the IOMMU to on even when you have less than 4GB of
|
||||
memory and add debugging code. On overflow always panic. And
|
||||
allow to enable IOMMU leak tracing. Can be disabled at boot
|
||||
|
@ -170,7 +171,7 @@ config IOMMU_LEAK
|
|||
bool "IOMMU leak tracing"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on IOMMU_DEBUG
|
||||
help
|
||||
---help---
|
||||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||
|
||||
|
@ -203,25 +204,25 @@ choice
|
|||
|
||||
config IO_DELAY_0X80
|
||||
bool "port 0x80 based port-IO delay [recommended]"
|
||||
help
|
||||
---help---
|
||||
This is the traditional Linux IO delay used for in/out_p.
|
||||
It is the most tested hence safest selection here.
|
||||
|
||||
config IO_DELAY_0XED
|
||||
bool "port 0xed based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use port 0xed as the IO delay. This frees up port 0x80 which is
|
||||
often used as a hardware-debug port.
|
||||
|
||||
config IO_DELAY_UDELAY
|
||||
bool "udelay based port-IO delay"
|
||||
help
|
||||
---help---
|
||||
Use udelay(2) as the IO delay method. This provides the delay
|
||||
while not having any side-effect on the IO port space.
|
||||
|
||||
config IO_DELAY_NONE
|
||||
bool "no port-IO delay"
|
||||
help
|
||||
---help---
|
||||
No port-IO delay. Will break on old boxes that require port-IO
|
||||
delay for certain operations. Should work on most new machines.
|
||||
|
||||
|
@ -255,18 +256,18 @@ config DEBUG_BOOT_PARAMS
|
|||
bool "Debug boot parameters"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on DEBUG_FS
|
||||
help
|
||||
---help---
|
||||
This option will cause struct boot_params to be exported via debugfs.
|
||||
|
||||
config CPA_DEBUG
|
||||
bool "CPA self-test code"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
---help---
|
||||
Do change_page_attr() self-tests every 30 seconds.
|
||||
|
||||
config OPTIMIZE_INLINING
|
||||
bool "Allow gcc to uninline functions marked 'inline'"
|
||||
help
|
||||
---help---
|
||||
This option determines if the kernel forces gcc to inline the functions
|
||||
developers have marked 'inline'. Doing so takes away freedom from gcc to
|
||||
do what it thinks is best, which is desirable for the gcc 3.x series of
|
||||
|
@ -279,4 +280,3 @@ config OPTIMIZE_INLINING
|
|||
If unsure, say N.
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -70,14 +70,17 @@ else
|
|||
# this works around some issues with generating unwind tables in older gccs
|
||||
# newer gccs do it by default
|
||||
KBUILD_CFLAGS += -maccumulate-outgoing-args
|
||||
endif
|
||||
|
||||
stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
|
||||
"$(CC)" -fstack-protector )
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
|
||||
"$(CC)" -fstack-protector-all )
|
||||
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
ifdef CONFIG_CC_STACKPROTECTOR
|
||||
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
|
||||
stackp-y := -fstack-protector
|
||||
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
|
||||
KBUILD_CFLAGS += $(stackp-y)
|
||||
else
|
||||
$(warning stack protector enabled but no compiler support)
|
||||
endif
|
||||
endif
|
||||
|
||||
# Stackpointer is addressed different for 32 bit and 64 bit x86
|
||||
|
@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
# prevent gcc from generating any FP code by mistake
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
|
||||
|
||||
###
|
||||
# Sub architecture support
|
||||
# fcore-y is linked before mcore-y files.
|
||||
|
||||
# Default subarch .c files
|
||||
mcore-y := arch/x86/mach-default/
|
||||
|
||||
# Voyager subarch support
|
||||
mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
|
||||
mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
|
||||
|
||||
# generic subarchitecture
|
||||
mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
|
||||
fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
|
||||
mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
|
||||
|
||||
# default subarch .h files
|
||||
mflags-y += -Iarch/x86/include/asm/mach-default
|
||||
|
||||
# 64 bit does not support subarch support - clear sub arch variables
|
||||
fcore-$(CONFIG_X86_64) :=
|
||||
mcore-$(CONFIG_X86_64) :=
|
||||
|
||||
KBUILD_CFLAGS += $(mflags-y)
|
||||
KBUILD_AFLAGS += $(mflags-y)
|
||||
|
||||
|
@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
|
|||
core-y += arch/x86/kernel/
|
||||
core-y += arch/x86/mm/
|
||||
|
||||
# Remaining sub architecture files
|
||||
core-y += $(mcore-y)
|
||||
|
||||
core-y += arch/x86/crypto/
|
||||
core-y += arch/x86/vdso/
|
||||
core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
|
||||
|
|
|
@ -32,7 +32,6 @@ setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
|
|||
setup-y += header.o main.o mca.o memory.o pm.o pmjump.o
|
||||
setup-y += printf.o string.o tty.o video.o video-mode.o version.o
|
||||
setup-$(CONFIG_X86_APM_BOOT) += apm.o
|
||||
setup-$(CONFIG_X86_VOYAGER) += voyager.o
|
||||
|
||||
# The link order of the video-*.o modules can matter. In particular,
|
||||
# video-vga.o *must* be listed first, followed by video-vesa.o.
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2007-2008 rPath, Inc. - All Rights Reserved
|
||||
* Copyright 2009 Intel Corporation
|
||||
*
|
||||
* This file is part of the Linux kernel, and is made available under
|
||||
* the terms of the GNU General Public License version 2.
|
||||
|
@ -15,16 +16,23 @@
|
|||
#include "boot.h"
|
||||
|
||||
#define MAX_8042_LOOPS 100000
|
||||
#define MAX_8042_FF 32
|
||||
|
||||
static int empty_8042(void)
|
||||
{
|
||||
u8 status;
|
||||
int loops = MAX_8042_LOOPS;
|
||||
int ffs = MAX_8042_FF;
|
||||
|
||||
while (loops--) {
|
||||
io_delay();
|
||||
|
||||
status = inb(0x64);
|
||||
if (status == 0xff) {
|
||||
/* FF is a plausible, but very unlikely status */
|
||||
if (!--ffs)
|
||||
return -1; /* Assume no KBC present */
|
||||
}
|
||||
if (status & 1) {
|
||||
/* Read and discard input data */
|
||||
io_delay();
|
||||
|
@ -118,44 +126,37 @@ static void enable_a20_fast(void)
|
|||
|
||||
int enable_a20(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_ELAN)
|
||||
/* Elan croaks if we try to touch the KBC */
|
||||
enable_a20_fast();
|
||||
while (!a20_test_long())
|
||||
;
|
||||
return 0;
|
||||
#elif defined(CONFIG_X86_VOYAGER)
|
||||
/* On Voyager, a20_test() is unsafe? */
|
||||
enable_a20_kbc();
|
||||
return 0;
|
||||
#else
|
||||
int loops = A20_ENABLE_LOOPS;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
int kbc_err;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
while (loops--) {
|
||||
/* First, check to see if A20 is already enabled
|
||||
(legacy free, etc.) */
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Next, try the BIOS (INT 0x15, AX=0x2401) */
|
||||
enable_a20_bios();
|
||||
if (a20_test_short())
|
||||
return 0;
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
kbc_err = empty_8042();
|
||||
|
||||
/* Try enabling A20 through the keyboard controller */
|
||||
empty_8042();
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
#endif
|
||||
if (a20_test_short())
|
||||
return 0; /* BIOS worked, but with delayed reaction */
|
||||
|
||||
if (!kbc_err) {
|
||||
enable_a20_kbc();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Finally, try enabling the "fast A20 gate" */
|
||||
enable_a20_fast();
|
||||
if (a20_test_long())
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -302,9 +302,6 @@ void probe_cards(int unsafe);
|
|||
/* video-vesa.c */
|
||||
void vesa_store_edid(void);
|
||||
|
||||
/* voyager.c */
|
||||
int query_voyager(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* BOOT_BOOT_H */
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# create a compressed vmlinux image from the original vmlinux
|
||||
#
|
||||
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
|
||||
|
@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
|
|||
ifdef CONFIG_RELOCATABLE
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
|
||||
$(call if_changed,lzma)
|
||||
else
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzma)
|
||||
endif
|
||||
LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
|
||||
|
||||
else
|
||||
|
||||
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,gzip)
|
||||
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,bzip2)
|
||||
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzma)
|
||||
|
||||
LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
|
||||
endif
|
||||
|
||||
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
|
||||
suffix_$(CONFIG_KERNEL_GZIP) = gz
|
||||
suffix_$(CONFIG_KERNEL_BZIP2) = bz2
|
||||
suffix_$(CONFIG_KERNEL_LZMA) = lzma
|
||||
|
||||
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
|
|
@ -25,14 +25,12 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.section ".text.head","ax",@progbits
|
||||
.globl startup_32
|
||||
|
||||
startup_32:
|
||||
ENTRY(startup_32)
|
||||
cld
|
||||
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments */
|
||||
|
@ -113,6 +111,8 @@ startup_32:
|
|||
*/
|
||||
leal relocated(%ebx), %eax
|
||||
jmp *%eax
|
||||
ENDPROC(startup_32)
|
||||
|
||||
.section ".text"
|
||||
relocated:
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
@ -35,9 +35,7 @@
|
|||
|
||||
.section ".text.head"
|
||||
.code32
|
||||
.globl startup_32
|
||||
|
||||
startup_32:
|
||||
ENTRY(startup_32)
|
||||
cld
|
||||
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
|
||||
* us to not reload segments */
|
||||
|
@ -176,6 +174,7 @@ startup_32:
|
|||
|
||||
/* Jump from 32bit compatibility mode into 64bit mode. */
|
||||
lret
|
||||
ENDPROC(startup_32)
|
||||
|
||||
no_longmode:
|
||||
/* This isn't an x86-64 CPU so hang */
|
||||
|
@ -295,7 +294,6 @@ relocated:
|
|||
call decompress_kernel
|
||||
popq %rsi
|
||||
|
||||
|
||||
/*
|
||||
* Jump to the decompressed kernel.
|
||||
*/
|
||||
|
|
|
@ -116,71 +116,13 @@
|
|||
/*
|
||||
* gzip declarations
|
||||
*/
|
||||
|
||||
#define OF(args) args
|
||||
#define STATIC static
|
||||
|
||||
#undef memset
|
||||
#undef memcpy
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
typedef unsigned char uch;
|
||||
typedef unsigned short ush;
|
||||
typedef unsigned long ulg;
|
||||
|
||||
/*
|
||||
* Window size must be at least 32k, and a power of two.
|
||||
* We don't actually have a window just a huge output buffer,
|
||||
* so we report a 2G window size, as that should always be
|
||||
* larger than our output buffer:
|
||||
*/
|
||||
#define WSIZE 0x80000000
|
||||
|
||||
/* Input buffer: */
|
||||
static unsigned char *inbuf;
|
||||
|
||||
/* Sliding window buffer (and final output buffer): */
|
||||
static unsigned char *window;
|
||||
|
||||
/* Valid bytes in inbuf: */
|
||||
static unsigned insize;
|
||||
|
||||
/* Index of next byte to be processed in inbuf: */
|
||||
static unsigned inptr;
|
||||
|
||||
/* Bytes in output buffer: */
|
||||
static unsigned outcnt;
|
||||
|
||||
/* gzip flag byte */
|
||||
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
|
||||
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
|
||||
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
|
||||
#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
|
||||
#define COMMENT 0x10 /* bit 4 set: file comment present */
|
||||
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
|
||||
#define RESERVED 0xC0 /* bit 6, 7: reserved */
|
||||
|
||||
#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
|
||||
|
||||
/* Diagnostic functions */
|
||||
#ifdef DEBUG
|
||||
# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
|
||||
# define Trace(x) do { fprintf x; } while (0)
|
||||
# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
|
||||
# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
|
||||
# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
|
||||
# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
|
||||
#else
|
||||
# define Assert(cond, msg)
|
||||
# define Trace(x)
|
||||
# define Tracev(x)
|
||||
# define Tracevv(x)
|
||||
# define Tracec(c, x)
|
||||
# define Tracecv(c, x)
|
||||
#endif
|
||||
|
||||
static int fill_inbuf(void);
|
||||
static void flush_window(void);
|
||||
static void error(char *m);
|
||||
|
||||
/*
|
||||
|
@ -189,13 +131,8 @@ static void error(char *m);
|
|||
static struct boot_params *real_mode; /* Pointer to real-mode data */
|
||||
static int quiet;
|
||||
|
||||
extern unsigned char input_data[];
|
||||
extern int input_len;
|
||||
|
||||
static long bytes_out;
|
||||
|
||||
static void *memset(void *s, int c, unsigned n);
|
||||
static void *memcpy(void *dest, const void *src, unsigned n);
|
||||
void *memcpy(void *dest, const void *src, unsigned n);
|
||||
|
||||
static void __putstr(int, const char *);
|
||||
#define putstr(__x) __putstr(0, __x)
|
||||
|
@ -213,7 +150,17 @@ static char *vidmem;
|
|||
static int vidport;
|
||||
static int lines, cols;
|
||||
|
||||
#include "../../../../lib/inflate.c"
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#include "../../../../lib/decompress_bunzip2.c"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KERNEL_LZMA
|
||||
#include "../../../../lib/decompress_unlzma.c"
|
||||
#endif
|
||||
|
||||
static void scroll(void)
|
||||
{
|
||||
|
@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
|
|||
return s;
|
||||
}
|
||||
|
||||
static void *memcpy(void *dest, const void *src, unsigned n)
|
||||
void *memcpy(void *dest, const void *src, unsigned n)
|
||||
{
|
||||
int i;
|
||||
const char *s = src;
|
||||
|
@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
|
|||
return dest;
|
||||
}
|
||||
|
||||
/* ===========================================================================
|
||||
* Fill the input buffer. This is called only when the buffer is empty
|
||||
* and at least one byte is really needed.
|
||||
*/
|
||||
static int fill_inbuf(void)
|
||||
{
|
||||
error("ran out of input data");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ===========================================================================
|
||||
* Write the output window window[0..outcnt-1] and update crc and bytes_out.
|
||||
* (Used for the decompressed data only.)
|
||||
*/
|
||||
static void flush_window(void)
|
||||
{
|
||||
/* With my window equal to my output buffer
|
||||
* I only need to compute the crc here.
|
||||
*/
|
||||
unsigned long c = crc; /* temporary variable */
|
||||
unsigned n;
|
||||
unsigned char *in, ch;
|
||||
|
||||
in = window;
|
||||
for (n = 0; n < outcnt; n++) {
|
||||
ch = *in++;
|
||||
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
|
||||
}
|
||||
crc = c;
|
||||
bytes_out += (unsigned long)outcnt;
|
||||
outcnt = 0;
|
||||
}
|
||||
|
||||
static void error(char *x)
|
||||
{
|
||||
|
@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
|||
lines = real_mode->screen_info.orig_video_lines;
|
||||
cols = real_mode->screen_info.orig_video_cols;
|
||||
|
||||
window = output; /* Output buffer (Normally at 1M) */
|
||||
free_mem_ptr = heap; /* Heap */
|
||||
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
|
||||
inbuf = input_data; /* Input buffer */
|
||||
insize = input_len;
|
||||
inptr = 0;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((unsigned long)output & (__KERNEL_ALIGN - 1))
|
||||
|
@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
|||
#endif
|
||||
#endif
|
||||
|
||||
makecrc();
|
||||
if (!quiet)
|
||||
putstr("\nDecompressing Linux... ");
|
||||
gunzip();
|
||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
||||
parse_elf(output);
|
||||
if (!quiet)
|
||||
putstr("done.\nBooting the kernel.\n");
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
*
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* Memory copy routines
|
||||
*/
|
||||
|
@ -15,9 +17,7 @@
|
|||
.code16gcc
|
||||
.text
|
||||
|
||||
.globl memcpy
|
||||
.type memcpy, @function
|
||||
memcpy:
|
||||
GLOBAL(memcpy)
|
||||
pushw %si
|
||||
pushw %di
|
||||
movw %ax, %di
|
||||
|
@ -31,11 +31,9 @@ memcpy:
|
|||
popw %di
|
||||
popw %si
|
||||
ret
|
||||
.size memcpy, .-memcpy
|
||||
ENDPROC(memcpy)
|
||||
|
||||
.globl memset
|
||||
.type memset, @function
|
||||
memset:
|
||||
GLOBAL(memset)
|
||||
pushw %di
|
||||
movw %ax, %di
|
||||
movzbl %dl, %eax
|
||||
|
@ -48,52 +46,42 @@ memset:
|
|||
rep; stosb
|
||||
popw %di
|
||||
ret
|
||||
.size memset, .-memset
|
||||
ENDPROC(memset)
|
||||
|
||||
.globl copy_from_fs
|
||||
.type copy_from_fs, @function
|
||||
copy_from_fs:
|
||||
GLOBAL(copy_from_fs)
|
||||
pushw %ds
|
||||
pushw %fs
|
||||
popw %ds
|
||||
call memcpy
|
||||
popw %ds
|
||||
ret
|
||||
.size copy_from_fs, .-copy_from_fs
|
||||
ENDPROC(copy_from_fs)
|
||||
|
||||
.globl copy_to_fs
|
||||
.type copy_to_fs, @function
|
||||
copy_to_fs:
|
||||
GLOBAL(copy_to_fs)
|
||||
pushw %es
|
||||
pushw %fs
|
||||
popw %es
|
||||
call memcpy
|
||||
popw %es
|
||||
ret
|
||||
.size copy_to_fs, .-copy_to_fs
|
||||
ENDPROC(copy_to_fs)
|
||||
|
||||
#if 0 /* Not currently used, but can be enabled as needed */
|
||||
|
||||
.globl copy_from_gs
|
||||
.type copy_from_gs, @function
|
||||
copy_from_gs:
|
||||
GLOBAL(copy_from_gs)
|
||||
pushw %ds
|
||||
pushw %gs
|
||||
popw %ds
|
||||
call memcpy
|
||||
popw %ds
|
||||
ret
|
||||
.size copy_from_gs, .-copy_from_gs
|
||||
.globl copy_to_gs
|
||||
ENDPROC(copy_from_gs)
|
||||
|
||||
.type copy_to_gs, @function
|
||||
copy_to_gs:
|
||||
GLOBAL(copy_to_gs)
|
||||
pushw %es
|
||||
pushw %gs
|
||||
popw %es
|
||||
call memcpy
|
||||
popw %es
|
||||
ret
|
||||
.size copy_to_gs, .-copy_to_gs
|
||||
|
||||
ENDPROC(copy_to_gs)
|
||||
#endif
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/utsrelease.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/e820.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/setup.h>
|
||||
#include "boot.h"
|
||||
#include "offsets.h"
|
||||
|
|
|
@ -149,11 +149,6 @@ void main(void)
|
|||
/* Query MCA information */
|
||||
query_mca();
|
||||
|
||||
/* Voyager */
|
||||
#ifdef CONFIG_X86_VOYAGER
|
||||
query_voyager();
|
||||
#endif
|
||||
|
||||
/* Query Intel SpeedStep (IST) information */
|
||||
query_ist();
|
||||
|
||||
|
|
|
@ -15,18 +15,15 @@
|
|||
#include <asm/boot.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/segment.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
.globl protected_mode_jump
|
||||
.type protected_mode_jump, @function
|
||||
|
||||
.code16
|
||||
|
||||
/*
|
||||
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
|
||||
*/
|
||||
protected_mode_jump:
|
||||
GLOBAL(protected_mode_jump)
|
||||
movl %edx, %esi # Pointer to boot_params table
|
||||
|
||||
xorl %ebx, %ebx
|
||||
|
@ -47,12 +44,10 @@ protected_mode_jump:
|
|||
.byte 0x66, 0xea # ljmpl opcode
|
||||
2: .long in_pm32 # offset
|
||||
.word __BOOT_CS # segment
|
||||
|
||||
.size protected_mode_jump, .-protected_mode_jump
|
||||
ENDPROC(protected_mode_jump)
|
||||
|
||||
.code32
|
||||
.type in_pm32, @function
|
||||
in_pm32:
|
||||
GLOBAL(in_pm32)
|
||||
# Set up data segments for flat 32-bit mode
|
||||
movl %ecx, %ds
|
||||
movl %ecx, %es
|
||||
|
@ -78,5 +73,4 @@ in_pm32:
|
|||
lldt %cx
|
||||
|
||||
jmpl *%eax # Jump to the 32-bit entrypoint
|
||||
|
||||
.size in_pm32, .-in_pm32
|
||||
ENDPROC(in_pm32)
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
/* -*- linux-c -*- ------------------------------------------------------- *
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
* Copyright 2007 rPath, Inc. - All Rights Reserved
|
||||
*
|
||||
* This file is part of the Linux kernel, and is made available under
|
||||
* the terms of the GNU General Public License version 2.
|
||||
*
|
||||
* ----------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* Get the Voyager config information
|
||||
*/
|
||||
|
||||
#include "boot.h"
|
||||
|
||||
int query_voyager(void)
|
||||
{
|
||||
u8 err;
|
||||
u16 es, di;
|
||||
/* Abuse the apm_bios_info area for this */
|
||||
u8 *data_ptr = (u8 *)&boot_params.apm_bios_info;
|
||||
|
||||
data_ptr[0] = 0xff; /* Flag on config not found(?) */
|
||||
|
||||
asm("pushw %%es ; "
|
||||
"int $0x15 ; "
|
||||
"setc %0 ; "
|
||||
"movw %%es, %1 ; "
|
||||
"popw %%es"
|
||||
: "=q" (err), "=r" (es), "=D" (di)
|
||||
: "a" (0xffc0));
|
||||
|
||||
if (err)
|
||||
return -1; /* Not Voyager */
|
||||
|
||||
set_fs(es);
|
||||
copy_from_fs(data_ptr, di, 7); /* Table is 7 bytes apparently */
|
||||
return 0;
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -33,8 +33,6 @@
|
|||
#include <asm/sigframe.h>
|
||||
#include <asm/sys_ia32.h>
|
||||
|
||||
#define DEBUG_SIG 0
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
|
||||
|
@ -46,78 +44,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
|||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
put_user_try {
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
put_user_ex(from->si_signo, &to->si_signo);
|
||||
put_user_ex(from->si_errno, &to->si_errno);
|
||||
put_user_ex((short)from->si_code, &to->si_code);
|
||||
|
||||
if (from->si_code < 0) {
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
err |= __put_user(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
if (from->si_code < 0) {
|
||||
put_user_ex(from->si_pid, &to->si_pid);
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
put_user_ex(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
put_user_ex(from->si_utime, &to->si_utime);
|
||||
put_user_ex(from->si_stime, &to->si_stime);
|
||||
put_user_ex(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
put_user_ex(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
put_user_ex(from->si_overrun, &to->si_overrun);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(from->si_int, &to->si_int);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 ptr32;
|
||||
|
||||
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
get_user_try {
|
||||
get_user_ex(to->si_signo, &from->si_signo);
|
||||
get_user_ex(to->si_errno, &from->si_errno);
|
||||
get_user_ex(to->si_code, &from->si_code);
|
||||
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
get_user_ex(to->si_pid, &from->si_pid);
|
||||
get_user_ex(to->si_uid, &from->si_uid);
|
||||
get_user_ex(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
} get_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -142,17 +145,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
int ret, err = 0;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (uss_ptr) {
|
||||
u32 ptr;
|
||||
|
||||
memset(&uss, 0, sizeof(stack_t));
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__get_user(ptr, &uss_ptr->ss_sp) ||
|
||||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
|
||||
__get_user(uss.ss_size, &uss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
get_user_try {
|
||||
get_user_ex(ptr, &uss_ptr->ss_sp);
|
||||
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
|
||||
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
|
||||
} get_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
uss.ss_sp = compat_ptr(ptr);
|
||||
}
|
||||
|
@ -161,10 +170,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
|
||||
set_fs(seg);
|
||||
if (ret >= 0 && uoss_ptr) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
|
||||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
|
||||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
|
||||
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
|
||||
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
|
@ -173,75 +188,78 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
/*
|
||||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
#define loadsegment_gs(v) load_gs_index(v)
|
||||
#define loadsegment_fs(v) loadsegment(fs, v)
|
||||
#define loadsegment_ds(v) loadsegment(ds, v)
|
||||
#define loadsegment_es(v) loadsegment(es, v)
|
||||
|
||||
#define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; })
|
||||
#define set_user_seg(seg, v) loadsegment_##seg(v)
|
||||
|
||||
#define COPY(x) { \
|
||||
err |= __get_user(regs->x, &sc->x); \
|
||||
get_user_ex(regs->x, &sc->x); \
|
||||
}
|
||||
|
||||
#define COPY_SEG_CPL3(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
regs->seg = tmp | 3; \
|
||||
}
|
||||
#define GET_SEG(seg) ({ \
|
||||
unsigned short tmp; \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
tmp; \
|
||||
})
|
||||
|
||||
#define COPY_SEG_CPL3(seg) do { \
|
||||
regs->seg = GET_SEG(seg) | 3; \
|
||||
} while (0)
|
||||
|
||||
#define RELOAD_SEG(seg) { \
|
||||
unsigned int cur, pre; \
|
||||
err |= __get_user(pre, &sc->seg); \
|
||||
savesegment(seg, cur); \
|
||||
unsigned int pre = GET_SEG(seg); \
|
||||
unsigned int cur = get_user_seg(seg); \
|
||||
pre |= 3; \
|
||||
if (pre != cur) \
|
||||
loadsegment(seg, pre); \
|
||||
set_user_seg(seg, pre); \
|
||||
}
|
||||
|
||||
static int ia32_restore_sigcontext(struct pt_regs *regs,
|
||||
struct sigcontext_ia32 __user *sc,
|
||||
unsigned int *pax)
|
||||
{
|
||||
unsigned int tmpflags, gs, oldgs, err = 0;
|
||||
unsigned int tmpflags, err = 0;
|
||||
void __user *buf;
|
||||
u32 tmp;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG restore_sigcontext: "
|
||||
"sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
|
||||
sc, sc->err, sc->ip, sc->cs, sc->flags);
|
||||
#endif
|
||||
get_user_try {
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
RELOAD_SEG(gs);
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
err |= __get_user(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
get_user_ex(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
get_user_ex(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
err |= __get_user(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= __get_user(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
err |= __get_user(*pax, &sc->ax);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -317,38 +335,36 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||
void __user *fpstate,
|
||||
struct pt_regs *regs, unsigned int mask)
|
||||
{
|
||||
int tmp, err = 0;
|
||||
int err = 0;
|
||||
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
|
||||
put_user_try {
|
||||
put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs);
|
||||
put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs);
|
||||
put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds);
|
||||
put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es);
|
||||
|
||||
err |= __put_user(regs->di, &sc->di);
|
||||
err |= __put_user(regs->si, &sc->si);
|
||||
err |= __put_user(regs->bp, &sc->bp);
|
||||
err |= __put_user(regs->sp, &sc->sp);
|
||||
err |= __put_user(regs->bx, &sc->bx);
|
||||
err |= __put_user(regs->dx, &sc->dx);
|
||||
err |= __put_user(regs->cx, &sc->cx);
|
||||
err |= __put_user(regs->ax, &sc->ax);
|
||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||
err |= __put_user(current->thread.error_code, &sc->err);
|
||||
err |= __put_user(regs->ip, &sc->ip);
|
||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
put_user_ex(regs->di, &sc->di);
|
||||
put_user_ex(regs->si, &sc->si);
|
||||
put_user_ex(regs->bp, &sc->bp);
|
||||
put_user_ex(regs->sp, &sc->sp);
|
||||
put_user_ex(regs->bx, &sc->bx);
|
||||
put_user_ex(regs->dx, &sc->dx);
|
||||
put_user_ex(regs->cx, &sc->cx);
|
||||
put_user_ex(regs->ax, &sc->ax);
|
||||
put_user_ex(current->thread.trap_no, &sc->trapno);
|
||||
put_user_ex(current->thread.error_code, &sc->err);
|
||||
put_user_ex(regs->ip, &sc->ip);
|
||||
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->sp, &sc->sp_at_signal);
|
||||
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
|
||||
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
err |= __put_user(current->thread.cr2, &sc->cr2);
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
put_user_ex(current->thread.cr2, &sc->cr2);
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -437,13 +453,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
else
|
||||
restorer = &frame->retcode;
|
||||
}
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -462,11 +482,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
|
||||
current->comm, current->pid, frame, regs->ip, frame->pretcode);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -496,41 +511,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
put_user_try {
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -549,10 +563,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
regs->cs = __USER32_CS;
|
||||
regs->ss = __USER32_DS;
|
||||
|
||||
#if DEBUG_SIG
|
||||
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
|
||||
current->comm, current->pid, frame, regs->ip, frame->pretcode);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
|
|||
CFI_DEF_CFA rsp,0
|
||||
CFI_REGISTER rsp,rbp
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movq %gs:pda_kernelstack, %rsp
|
||||
addq $(PDA_STACKOFFSET),%rsp
|
||||
movq PER_CPU_VAR(kernel_stack), %rsp
|
||||
addq $(KERNEL_STACK_OFFSET),%rsp
|
||||
/*
|
||||
* No need to follow this irqs on/off section: the syscall
|
||||
* disabled irqs, here we enable it straight after entry:
|
||||
|
@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
|
|||
ENTRY(ia32_cstar_target)
|
||||
CFI_STARTPROC32 simple
|
||||
CFI_SIGNAL_FRAME
|
||||
CFI_DEF_CFA rsp,PDA_STACKOFFSET
|
||||
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
||||
CFI_REGISTER rip,rcx
|
||||
/*CFI_REGISTER rflags,r11*/
|
||||
SWAPGS_UNSAFE_STACK
|
||||
movl %esp,%r8d
|
||||
CFI_REGISTER rsp,r8
|
||||
movq %gs:pda_kernelstack,%rsp
|
||||
movq PER_CPU_VAR(kernel_stack),%rsp
|
||||
/*
|
||||
* No need to follow this irqs on/off section: the syscall
|
||||
* disabled irqs and here we enable it straight after entry:
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
|
|||
dump->regs.ds = (u16)regs->ds;
|
||||
dump->regs.es = (u16)regs->es;
|
||||
dump->regs.fs = (u16)regs->fs;
|
||||
savesegment(gs, dump->regs.gs);
|
||||
dump->regs.gs = get_user_gs(regs);
|
||||
dump->regs.orig_ax = regs->orig_ax;
|
||||
dump->regs.ip = regs->ip;
|
||||
dump->regs.cs = (u16)regs->cs;
|
||||
|
|
|
@ -102,9 +102,6 @@ static inline void disable_acpi(void)
|
|||
acpi_noirq = 1;
|
||||
}
|
||||
|
||||
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
|
||||
#define FIX_ACPI_PAGES 4
|
||||
|
||||
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
|
||||
|
||||
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
#ifndef _ASM_X86_APIC_H
|
||||
#define _ASM_X86_APIC_H
|
||||
|
||||
#include <linux/pm.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
#define ARCH_APICTIMER_STOPS_ON_C3 1
|
||||
|
@ -33,7 +36,13 @@
|
|||
} while (0)
|
||||
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
||||
extern void generic_apic_probe(void);
|
||||
#else
|
||||
static inline void generic_apic_probe(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
|
@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
|
|||
extern int local_apic_timer_c2_ok;
|
||||
|
||||
extern int disable_apic;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline void __inquire_remote_apic(int apicid)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline void default_inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic functions accessing APICs.
|
||||
*/
|
||||
|
@ -51,7 +75,14 @@ extern int disable_apic;
|
|||
#define setup_secondary_clock setup_secondary_APIC_clock
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_VSMP
|
||||
extern int is_vsmp_box(void);
|
||||
#else
|
||||
static inline int is_vsmp_box(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
extern void xapic_wait_icr_idle(void);
|
||||
extern u32 safe_xapic_wait_icr_idle(void);
|
||||
extern void xapic_icr_write(u32, u32);
|
||||
|
@ -71,6 +102,12 @@ static inline u32 native_apic_mem_read(u32 reg)
|
|||
return *((volatile u32 *)(APIC_BASE + reg));
|
||||
}
|
||||
|
||||
extern void native_apic_wait_icr_idle(void);
|
||||
extern u32 native_safe_apic_wait_icr_idle(void);
|
||||
extern void native_apic_icr_write(u32 low, u32 id);
|
||||
extern u64 native_apic_icr_read(void);
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
static inline void native_apic_msr_write(u32 reg, u32 v)
|
||||
{
|
||||
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
|
||||
|
@ -91,8 +128,32 @@ static inline u32 native_apic_msr_read(u32 reg)
|
|||
return low;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_32
|
||||
extern int x2apic;
|
||||
static inline void native_x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return;
|
||||
}
|
||||
|
||||
static inline u32 native_safe_x2apic_wait_icr_idle(void)
|
||||
{
|
||||
/* no need to wait for icr idle in x2apic */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void native_x2apic_icr_write(u32 low, u32 id)
|
||||
{
|
||||
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
|
||||
}
|
||||
|
||||
static inline u64 native_x2apic_icr_read(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
|
||||
return val;
|
||||
}
|
||||
|
||||
extern int x2apic, x2apic_phys;
|
||||
extern void check_x2apic(void);
|
||||
extern void enable_x2apic(void);
|
||||
extern void enable_IR_x2apic(void);
|
||||
|
@ -110,30 +171,24 @@ static inline int x2apic_enabled(void)
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
#define x2apic_enabled() 0
|
||||
static inline void check_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline void enable_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline void enable_IR_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline int x2apic_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct apic_ops {
|
||||
u32 (*read)(u32 reg);
|
||||
void (*write)(u32 reg, u32 v);
|
||||
u64 (*icr_read)(void);
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
};
|
||||
|
||||
extern struct apic_ops *apic_ops;
|
||||
|
||||
#define apic_read (apic_ops->read)
|
||||
#define apic_write (apic_ops->write)
|
||||
#define apic_icr_read (apic_ops->icr_read)
|
||||
#define apic_icr_write (apic_ops->icr_write)
|
||||
#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
|
||||
#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
static inline void ack_x2APIC_irq(void)
|
||||
{
|
||||
/* Docs say use 0 for future compatibility */
|
||||
|
@ -141,18 +196,6 @@ static inline void ack_x2APIC_irq(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
}
|
||||
|
||||
extern int lapic_get_maxlvt(void);
|
||||
extern void clear_local_APIC(void);
|
||||
extern void connect_bsp_APIC(void);
|
||||
|
@ -196,4 +239,327 @@ static inline void disable_local_APIC(void) { }
|
|||
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define SET_APIC_ID(x) (apic->set_apic_id(x))
|
||||
#else
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
*
|
||||
* Generic APIC sub-arch data struct.
|
||||
*
|
||||
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
|
||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||
* James Cleverdon.
|
||||
*/
|
||||
struct apic {
|
||||
char *name;
|
||||
|
||||
int (*probe)(void);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
int (*apic_id_registered)(void);
|
||||
|
||||
u32 irq_delivery_mode;
|
||||
u32 irq_dest_mode;
|
||||
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
|
||||
int disable_esr;
|
||||
|
||||
int dest_logical;
|
||||
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
|
||||
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
|
||||
void (*setup_portio_remap)(void);
|
||||
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
|
||||
void (*enable_apic_mode)(void);
|
||||
int (*phys_pkg_id)(int cpuid_apic, int index_msb);
|
||||
|
||||
/*
|
||||
* When one of the next two hooks returns 1 the apic
|
||||
* is switched to this. Essentially they are additional
|
||||
* probe functions:
|
||||
*/
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
|
||||
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
|
||||
|
||||
int trampoline_phys_low;
|
||||
int trampoline_phys_high;
|
||||
|
||||
void (*wait_for_init_deassert)(atomic_t *deassert);
|
||||
void (*smp_callin_clear_local_apic)(void);
|
||||
void (*inquire_remote_apic)(int apicid);
|
||||
|
||||
/* apic ops */
|
||||
u32 (*read)(u32 reg);
|
||||
void (*write)(u32 reg, u32 v);
|
||||
u64 (*icr_read)(void);
|
||||
void (*icr_write)(u32 low, u32 high);
|
||||
void (*wait_icr_idle)(void);
|
||||
u32 (*safe_wait_icr_idle)(void);
|
||||
};
|
||||
|
||||
/*
|
||||
* Pointer to the local APIC driver in use on this system (there's
|
||||
* always just one such driver in use - the kernel decides via an
|
||||
* early probing process which one it picks - and then sticks to it):
|
||||
*/
|
||||
extern struct apic *apic;
|
||||
|
||||
/*
|
||||
* APIC functionality to boot other CPUs - only used on SMP:
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
extern atomic_t init_deasserted;
|
||||
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
|
||||
#endif
|
||||
|
||||
static inline u32 apic_read(u32 reg)
|
||||
{
|
||||
return apic->read(reg);
|
||||
}
|
||||
|
||||
static inline void apic_write(u32 reg, u32 val)
|
||||
{
|
||||
apic->write(reg, val);
|
||||
}
|
||||
|
||||
static inline u64 apic_icr_read(void)
|
||||
{
|
||||
return apic->icr_read();
|
||||
}
|
||||
|
||||
static inline void apic_icr_write(u32 low, u32 high)
|
||||
{
|
||||
apic->icr_write(low, high);
|
||||
}
|
||||
|
||||
static inline void apic_wait_icr_idle(void)
|
||||
{
|
||||
apic->wait_icr_idle();
|
||||
}
|
||||
|
||||
static inline u32 safe_apic_wait_icr_idle(void)
|
||||
{
|
||||
return apic->safe_wait_icr_idle();
|
||||
}
|
||||
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
*/
|
||||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
||||
if (APIC_XAPIC(ver))
|
||||
return (x >> 24) & 0xFF;
|
||||
else
|
||||
return (x >> 24) & 0x0F;
|
||||
}
|
||||
|
||||
/*
|
||||
* Warm reset vector default position:
|
||||
*/
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
|
||||
#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern struct apic apic_flat;
|
||||
extern struct apic apic_physflat;
|
||||
extern struct apic apic_x2apic_cluster;
|
||||
extern struct apic apic_x2apic_phys;
|
||||
extern int default_acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void apic_send_IPI_self(int vector);
|
||||
|
||||
extern struct apic apic_x2apic_uv_x;
|
||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
|
||||
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline void default_wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
return;
|
||||
}
|
||||
|
||||
extern void generic_bigsmp_probe(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
|
||||
static inline const struct cpumask *default_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
|
||||
|
||||
static inline unsigned int read_apic_id(void)
|
||||
{
|
||||
unsigned int reg;
|
||||
|
||||
reg = apic_read(APIC_ID);
|
||||
|
||||
return apic->get_apic_id(reg);
|
||||
}
|
||||
|
||||
extern void default_setup_apic_routing(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
extern void default_init_apic_ldr(void);
|
||||
|
||||
static inline int default_apic_id_registered(void)
|
||||
{
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0];
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
}
|
||||
|
||||
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
}
|
||||
|
||||
static inline unsigned long default_check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
return phys_map;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int default_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static inline int __default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static inline int default_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
return __default_cpu_present_to_apicid(mps_cpu);
|
||||
}
|
||||
|
||||
static inline int
|
||||
default_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
|
||||
}
|
||||
#else
|
||||
extern int default_cpu_present_to_apicid(int mps_cpu);
|
||||
extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
|
||||
#endif
|
||||
|
||||
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern u8 cpu_2_logical_apicid[NR_CPUS];
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APIC_H */
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
#ifndef _ASM_X86_APICNUM_H
|
||||
#define _ASM_X86_APICNUM_H
|
||||
|
||||
/* define MAX_IO_APICS */
|
||||
#ifdef CONFIG_X86_32
|
||||
# define MAX_IO_APICS 64
|
||||
#else
|
||||
# define MAX_IO_APICS 128
|
||||
# define MAX_LOCAL_APIC 32768
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_APICNUM_H */
|
|
@ -1,26 +0,0 @@
|
|||
#ifndef _ASM_X86_ARCH_HOOKS_H
|
||||
#define _ASM_X86_ARCH_HOOKS_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/*
|
||||
* linux/include/asm/arch_hooks.h
|
||||
*
|
||||
* define the architecture specific hooks
|
||||
*/
|
||||
|
||||
/* these aren't arch hooks, they are generic routines
|
||||
* that can be used by the hooks */
|
||||
extern void init_ISA_irqs(void);
|
||||
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
|
||||
|
||||
/* these are the defined hooks */
|
||||
extern void intr_init_hook(void);
|
||||
extern void pre_intr_init_hook(void);
|
||||
extern void pre_setup_arch_hook(void);
|
||||
extern void trap_init_hook(void);
|
||||
extern void pre_time_init_hook(void);
|
||||
extern void time_init_hook(void);
|
||||
extern void mca_nmi_hook(void);
|
||||
|
||||
#endif /* _ASM_X86_ARCH_HOOKS_H */
|
|
@ -1,155 +0,0 @@
|
|||
#ifndef __ASM_MACH_APIC_H
|
||||
#define __ASM_MACH_APIC_H
|
||||
|
||||
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
|
||||
#define esr_disable (1)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return &cpu_online_map;
|
||||
#else
|
||||
return &cpumask_of_cpu(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target proc */
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
id = xapic_phys_to_log_apicid(cpu);
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
|
||||
"Physflat", nr_ioapics);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
return physid_mask_of_physid(phys_apicid);
|
||||
}
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xFFL);
|
||||
}
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask)
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_MACH_APICDEF_H
|
||||
#define __ASM_MACH_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0xFF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_MACH_IPI_H
|
||||
#define __ASM_MACH_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_IPI_H */
|
|
@ -10,17 +10,31 @@
|
|||
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
|
||||
#define ASK_VGA 0xfffd /* ask for it at bootup */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* Physical address where kernel should be loaded. */
|
||||
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
|
||||
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
||||
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#else /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define BOOT_HEAP_SIZE 0x7000
|
||||
#define BOOT_STACK_SIZE 0x4000
|
||||
#else
|
||||
#define BOOT_HEAP_SIZE 0x4000
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define BOOT_STACK_SIZE 0x4000
|
||||
#else
|
||||
#define BOOT_STACK_SIZE 0x1000
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_BOOT_H */
|
||||
|
|
|
@ -5,24 +5,43 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
/* Caches aren't brain-dead on the intel. */
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_range(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma, pg) do { } while (0)
|
||||
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
static inline void flush_cache_all(void) { }
|
||||
static inline void flush_cache_mm(struct mm_struct *mm) { }
|
||||
static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
|
||||
static inline void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end) { }
|
||||
static inline void flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn) { }
|
||||
static inline void flush_dcache_page(struct page *page) { }
|
||||
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
|
||||
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
|
||||
static inline void flush_icache_range(unsigned long start,
|
||||
unsigned long end) { }
|
||||
static inline void flush_icache_page(struct vm_area_struct *vma,
|
||||
struct page *page) { }
|
||||
static inline void flush_icache_user_range(struct vm_area_struct *vma,
|
||||
struct page *page,
|
||||
unsigned long addr,
|
||||
unsigned long len) { }
|
||||
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
|
||||
static inline void flush_cache_vunmap(unsigned long start,
|
||||
unsigned long end) { }
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy((dst), (src), (len))
|
||||
static inline void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
static inline void copy_from_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr,
|
||||
void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
#define PG_non_WB PG_arch_1
|
||||
PAGEFLAG(NonWB, non_WB)
|
||||
|
|
|
@ -1,5 +1,55 @@
|
|||
/*
|
||||
* Some macros to handle stack frames in assembly.
|
||||
|
||||
x86 function call convention, 64-bit:
|
||||
-------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
---------------------------------------------------------------------------
|
||||
rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
|
||||
|
||||
( rsp is obviously invariant across normal function calls. (gcc can 'merge'
|
||||
functions when it sees tail-call optimization possibilities) rflags is
|
||||
clobbered. Leftover arguments are passed over the stack frame.)
|
||||
|
||||
[*] In the frame-pointers case rbp is fixed to the stack frame.
|
||||
|
||||
[**] for struct return values wider than 64 bits the return convention is a
|
||||
bit more complex: up to 128 bits width we return small structures
|
||||
straight in rax, rdx. For structures larger than that (3 words or
|
||||
larger) the caller puts a pointer to an on-stack return struct
|
||||
[allocated in the caller's stack frame] into the first argument - i.e.
|
||||
into rdi. All other arguments shift up by one in this case.
|
||||
Fortunately this case is rare in the kernel.
|
||||
|
||||
For 32-bit we have the following conventions - kernel is built with
|
||||
-mregparm=3 and -freg-struct-return:
|
||||
|
||||
x86 function calling convention, 32-bit:
|
||||
----------------------------------------
|
||||
arguments | callee-saved | extra caller-saved | return
|
||||
[callee-clobbered] | | [callee-clobbered] |
|
||||
-------------------------------------------------------------------------
|
||||
eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
|
||||
|
||||
( here too esp is obviously invariant across normal function calls. eflags
|
||||
is clobbered. Leftover arguments are passed over the stack frame. )
|
||||
|
||||
[*] In the frame-pointers case ebp is fixed to the stack frame.
|
||||
|
||||
[**] We build with -freg-struct-return, which on 32-bit means similar
|
||||
semantics as on 64-bit: edx can be used for a second return value
|
||||
(i.e. covering integer and structure sizes up to 64 bits) - after that
|
||||
it gets more complex and more expensive: 3-word or larger struct returns
|
||||
get done in the caller's frame and the pointer to the return struct goes
|
||||
into regparm0, i.e. eax - the other arguments shift up and the
|
||||
function's register parameters degenerate to regparm=2 in essence.
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* 64-bit system call stack frame layout defines and helpers,
|
||||
* for assembly code:
|
||||
*/
|
||||
|
||||
#define R15 0
|
||||
|
@ -9,7 +59,7 @@
|
|||
#define RBP 32
|
||||
#define RBX 40
|
||||
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here: */
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@ -22,7 +72,7 @@
|
|||
#define ORIG_RAX 120 /* + error_code */
|
||||
/* end of arguments */
|
||||
|
||||
/* cpu exception frame or undefined in case of fast syscall. */
|
||||
/* cpu exception frame or undefined in case of fast syscall: */
|
||||
#define RIP 128
|
||||
#define CS 136
|
||||
#define EFLAGS 144
|
||||
|
|
|
@ -7,6 +7,20 @@
|
|||
#include <linux/nodemask.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
extern void prefill_possible_map(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void prefill_possible_map(void) {}
|
||||
|
||||
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
|
||||
#define safe_smp_processor_id() 0
|
||||
#define stack_smp_processor_id() 0
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
struct x86_cpu {
|
||||
struct cpu cpu;
|
||||
};
|
||||
|
@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
|
|||
#endif
|
||||
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
extern unsigned int boot_cpu_id;
|
||||
|
||||
#endif /* _ASM_X86_CPU_H */
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
#ifndef _ASM_X86_CPUMASK_H
|
||||
#define _ASM_X86_CPUMASK_H
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
extern cpumask_var_t cpu_callin_mask;
|
||||
extern cpumask_var_t cpu_callout_mask;
|
||||
extern cpumask_var_t cpu_initialized_mask;
|
||||
extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern cpumask_t cpu_callin_map;
|
||||
extern cpumask_t cpu_callout_map;
|
||||
extern cpumask_t cpu_initialized;
|
||||
extern cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
|
||||
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
|
||||
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
|
||||
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
|
||||
|
||||
static inline void setup_cpu_local_masks(void) { }
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
|
@ -1,39 +1,21 @@
|
|||
#ifndef _ASM_X86_CURRENT_H
|
||||
#define _ASM_X86_CURRENT_H
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct task_struct;
|
||||
|
||||
DECLARE_PER_CPU(struct task_struct *, current_task);
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return x86_read_percpu(current_task);
|
||||
}
|
||||
|
||||
#else /* X86_32 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/pda.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return read_pda(pcurrent);
|
||||
return percpu_read(current_task);
|
||||
}
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* X86_32 */
|
||||
|
||||
#define current get_current()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_CURRENT_H */
|
||||
|
|
|
@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
|
|||
* now struct_user_regs, they are different)
|
||||
*/
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
|
||||
do { \
|
||||
pr_reg[0] = regs->bx; \
|
||||
pr_reg[1] = regs->cx; \
|
||||
|
@ -124,7 +124,6 @@ do { \
|
|||
pr_reg[7] = regs->ds & 0xffff; \
|
||||
pr_reg[8] = regs->es & 0xffff; \
|
||||
pr_reg[9] = regs->fs & 0xffff; \
|
||||
savesegment(gs, pr_reg[10]); \
|
||||
pr_reg[11] = regs->orig_ax; \
|
||||
pr_reg[12] = regs->ip; \
|
||||
pr_reg[13] = regs->cs & 0xffff; \
|
||||
|
@ -133,6 +132,18 @@ do { \
|
|||
pr_reg[16] = regs->ss & 0xffff; \
|
||||
} while (0);
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
|
||||
do { \
|
||||
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
|
||||
pr_reg[10] = get_user_gs(regs); \
|
||||
} while (0);
|
||||
|
||||
#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
|
||||
do { \
|
||||
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
|
||||
savesegment(gs, pr_reg[10]); \
|
||||
} while (0);
|
||||
|
||||
#define ELF_PLATFORM (utsname()->machine)
|
||||
#define set_personality_64bit() do { } while (0)
|
||||
|
||||
|
|
|
@ -9,12 +9,28 @@
|
|||
* is no hardware IRQ pin equivalent for them, they are triggered
|
||||
* through the ICC by us (IPIs)
|
||||
*/
|
||||
#ifdef CONFIG_X86_SMP
|
||||
#ifdef CONFIG_SMP
|
||||
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
|
||||
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
||||
|
||||
BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
|
||||
smp_invalidate_interrupt)
|
||||
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
|
||||
smp_invalidate_interrupt)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
|
|||
* a much simpler SMP time architecture:
|
||||
*/
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
|
||||
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
|
||||
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_P4THERMAL
|
||||
BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
|
||||
#endif
|
|
@ -1,242 +0,0 @@
|
|||
#ifndef __ASM_ES7000_APIC_H
|
||||
#define __ASM_ES7000_APIC_H
|
||||
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
|
||||
#define esr_disable (1)
|
||||
|
||||
static inline int apic_id_registered(void)
|
||||
{
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus_cluster(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
return &cpumask_of_cpu(smp_processor_id());
|
||||
}
|
||||
|
||||
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
|
||||
#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
|
||||
#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
|
||||
#define NO_BALANCE_IRQ_CLUSTER (1)
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
#define INT_DELIVERY_MODE (dest_Fixed)
|
||||
#define INT_DEST_MODE (0) /* phys delivery to target procs */
|
||||
#define NO_BALANCE_IRQ (0)
|
||||
#undef APIC_DEST_LOGICAL
|
||||
#define APIC_DEST_LOGICAL 0x0
|
||||
|
||||
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned long check_apicid_present(int bit)
|
||||
{
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
#define apicid_cluster(apicid) (apicid & 0xF0)
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long id;
|
||||
id = xapic_phys_to_log_apicid(cpu);
|
||||
return (SET_APIC_LOGICAL_ID(id));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LdR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
*/
|
||||
static inline void init_apic_ldr_cluster(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static inline void init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_VALUE);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
extern int apic_version [MAX_APICS];
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster",
|
||||
nr_ioapics, cpus_addr(*target_cpus())[0]);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (!mps_cpu)
|
||||
return boot_cpu_physical_apicid;
|
||||
else if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
|
||||
{
|
||||
static int id = 0;
|
||||
physid_mask_t mask;
|
||||
mask = physid_mask_of_physid(id);
|
||||
++id;
|
||||
return mask;
|
||||
}
|
||||
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
return physids_promote(0xff);
|
||||
}
|
||||
|
||||
|
||||
static inline void setup_portio_remap(void)
|
||||
{
|
||||
}
|
||||
|
||||
extern unsigned int boot_cpu_physical_apicid;
|
||||
static inline int check_phys_apicid_present(int cpu_physical_apicid)
|
||||
{
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return 0xFF;
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(0);
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, *cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return cpu_to_logical_apicid(0);
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = cpu_to_logical_apicid(0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return apicid;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = cpu_mask_to_apicid(cpumask);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
#endif /* __ASM_ES7000_APIC_H */
|
|
@ -1,13 +0,0 @@
|
|||
#ifndef __ASM_ES7000_APICDEF_H
|
||||
#define __ASM_ES7000_APICDEF_H
|
||||
|
||||
#define APIC_ID_MASK (0xFF<<24)
|
||||
|
||||
static inline unsigned get_apic_id(unsigned long x)
|
||||
{
|
||||
return (((x)>>24)&0xFF);
|
||||
}
|
||||
|
||||
#define GET_APIC_ID(x) get_apic_id(x)
|
||||
|
||||
#endif
|
|
@ -1,22 +0,0 @@
|
|||
#ifndef __ASM_ES7000_IPI_H
|
||||
#define __ASM_ES7000_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_ES7000_IPI_H */
|
|
@ -1,29 +0,0 @@
|
|||
#ifndef __ASM_ES7000_MPPARSE_H
|
||||
#define __ASM_ES7000_MPPARSE_H
|
||||
|
||||
#include <linux/acpi.h>
|
||||
|
||||
extern int parse_unisys_oem (char *oemptr);
|
||||
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
|
||||
extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
|
||||
extern void setup_unisys(void);
|
||||
|
||||
#ifndef CONFIG_X86_GENERICARCH
|
||||
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
|
||||
extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static inline int es7000_check_dsdt(void)
|
||||
{
|
||||
struct acpi_table_header header;
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
|
||||
!strncmp(header.oem_id, "UNISYS", 6))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_MACH_MPPARSE_H */
|
|
@ -1,37 +0,0 @@
|
|||
#ifndef __ASM_ES7000_WAKECPU_H
|
||||
#define __ASM_ES7000_WAKECPU_H
|
||||
|
||||
#define TRAMPOLINE_PHYS_LOW 0x467
|
||||
#define TRAMPOLINE_PHYS_HIGH 0x469
|
||||
|
||||
static inline void wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
#ifndef CONFIG_ES7000_CLUSTERED_APIC
|
||||
while (!atomic_read(deassert))
|
||||
cpu_relax();
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/* Nothing to do for most platforms, since cleared by the INIT cycle */
|
||||
static inline void smp_callin_clear_local_apic(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
|
||||
{
|
||||
}
|
||||
|
||||
extern void __inquire_remote_apic(int apicid);
|
||||
|
||||
static inline void inquire_remote_apic(int apicid)
|
||||
{
|
||||
if (apic_verbosity >= APIC_DEBUG)
|
||||
__inquire_remote_apic(apicid);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_WAKECPU_H */
|
|
@ -1,12 +1,146 @@
|
|||
/*
|
||||
* fixmap.h: compile-time virtual memory allocation
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
*
|
||||
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
|
||||
* x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_FIXMAP_H
|
||||
#define _ASM_X86_FIXMAP_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/page.h>
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "fixmap_32.h"
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#else
|
||||
# include "fixmap_64.h"
|
||||
#include <asm/vsyscall.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
|
||||
* uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
|
||||
* Because of this, FIXADDR_TOP x86 integration was left as later work.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
/* used by vmalloc.c, vsyscall.lds.S.
|
||||
*
|
||||
* Leave one empty page between vmalloc'ed areas and
|
||||
* the start of the fixmap.
|
||||
*/
|
||||
extern unsigned long __FIXADDR_TOP;
|
||||
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
|
||||
|
||||
#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
|
||||
#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
|
||||
#else
|
||||
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
|
||||
|
||||
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
|
||||
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
|
||||
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process.
|
||||
* for x86_32: We allocate these special addresses
|
||||
* from the end of virtual memory (0xfffff000) backwards.
|
||||
* Also this lets us do fail-safe vmalloc(), we
|
||||
* can guarantee that these special addresses and
|
||||
* vmalloc()-ed addresses never overlap.
|
||||
*
|
||||
* These 'compile-time allocated' memory buffers are
|
||||
* fixed-size 4k pages (or larger if used with an increment
|
||||
* higher than 1). Use set_fixmap(idx,phys) to associate
|
||||
* physical memory with fixmap indices.
|
||||
*
|
||||
* TLB entries of such buffers will not be flushed across
|
||||
* task switches.
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
#ifdef CONFIG_X86_32
|
||||
FIX_HOLE,
|
||||
FIX_VDSO,
|
||||
#else
|
||||
VSYSCALL_LAST_PAGE,
|
||||
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
|
||||
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
|
||||
VSYSCALL_HPET,
|
||||
#endif
|
||||
FIX_DBGP_BASE,
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
FIX_IO_APIC_BASE_0,
|
||||
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_X86_VISWS_APIC
|
||||
FIX_CO_CPU, /* Cobalt timer */
|
||||
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
|
||||
FIX_LI_PCIA, /* Lithium PCI Bridge A */
|
||||
FIX_LI_PCIB, /* Lithium PCI Bridge B */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_F00F_BUG
|
||||
FIX_F00F_IDT, /* Virtual mapping for IDT */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_CYCLONE_TIMER
|
||||
FIX_CYCLONE_TIMER, /*cyclone timer register*/
|
||||
#endif
|
||||
#ifdef CONFIG_X86_32
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
FIX_PCIE_MCFG,
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
FIX_PARAVIRT_BOOTMAP,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
/*
|
||||
* 256 temporary boot-time mappings, used by early_ioremap(),
|
||||
* before ioremap() is functional.
|
||||
*
|
||||
* We round it up to the next 256 pages boundary so that we
|
||||
* can have a single pgd entry and a single pte table:
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 64
|
||||
#define FIX_BTMAPS_SLOTS 4
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
|
||||
(__end_of_permanent_fixed_addresses & 255),
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
|
||||
#ifdef CONFIG_X86_32
|
||||
FIX_WP_TEST,
|
||||
#endif
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
|
||||
extern void reserve_top_address(unsigned long reserve);
|
||||
|
||||
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
||||
#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
|
||||
|
||||
extern int fixmaps_set;
|
||||
|
||||
extern pte_t *kmap_pte;
|
||||
|
@ -69,4 +203,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
|
|||
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
|
||||
return __virt_to_fix(vaddr);
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_FIXMAP_H */
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
* fixmap.h: compile-time virtual memory allocation
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
*
|
||||
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_FIXMAP_32_H
|
||||
#define _ASM_X86_FIXMAP_32_H
|
||||
|
||||
|
||||
/* used by vmalloc.c, vsyscall.lds.S.
|
||||
*
|
||||
* Leave one empty page between vmalloc'ed areas and
|
||||
* the start of the fixmap.
|
||||
*/
|
||||
extern unsigned long __FIXADDR_TOP;
|
||||
#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
|
||||
#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/page.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process. We allocate these special addresses
|
||||
* from the end of virtual memory (0xfffff000) backwards.
|
||||
* Also this lets us do fail-safe vmalloc(), we
|
||||
* can guarantee that these special addresses and
|
||||
* vmalloc()-ed addresses never overlap.
|
||||
*
|
||||
* these 'compile-time allocated' memory buffers are
|
||||
* fixed-size 4k pages. (or larger if used with an increment
|
||||
* highger than 1) use fixmap_set(idx,phys) to associate
|
||||
* physical memory with fixmap indices.
|
||||
*
|
||||
* TLB entries of such buffers will not be flushed across
|
||||
* task switches.
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
FIX_HOLE,
|
||||
FIX_VDSO,
|
||||
FIX_DBGP_BASE,
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
FIX_IO_APIC_BASE_0,
|
||||
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
|
||||
#endif
|
||||
#ifdef CONFIG_X86_VISWS_APIC
|
||||
FIX_CO_CPU, /* Cobalt timer */
|
||||
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
|
||||
FIX_LI_PCIA, /* Lithium PCI Bridge A */
|
||||
FIX_LI_PCIB, /* Lithium PCI Bridge B */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_F00F_BUG
|
||||
FIX_F00F_IDT, /* Virtual mapping for IDT */
|
||||
#endif
|
||||
#ifdef CONFIG_X86_CYCLONE_TIMER
|
||||
FIX_CYCLONE_TIMER, /*cyclone timer register*/
|
||||
#endif
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
FIX_PCIE_MCFG,
|
||||
#endif
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
FIX_PARAVIRT_BOOTMAP,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
/*
|
||||
* 256 temporary boot-time mappings, used by early_ioremap(),
|
||||
* before ioremap() is functional.
|
||||
*
|
||||
* We round it up to the next 256 pages boundary so that we
|
||||
* can have a single pgd entry and a single pte table:
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 64
|
||||
#define FIX_BTMAPS_SLOTS 4
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
|
||||
(__end_of_permanent_fixed_addresses & 255),
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
|
||||
FIX_WP_TEST,
|
||||
#ifdef CONFIG_ACPI
|
||||
FIX_ACPI_BEGIN,
|
||||
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
extern void reserve_top_address(unsigned long reserve);
|
||||
|
||||
|
||||
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
|
||||
|
||||
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
|
||||
#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_FIXMAP_32_H */
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* fixmap.h: compile-time virtual memory allocation
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1998 Ingo Molnar
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_FIXMAP_64_H
|
||||
#define _ASM_X86_FIXMAP_64_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/vsyscall.h>
|
||||
|
||||
/*
|
||||
* Here we define all the compile-time 'special' virtual
|
||||
* addresses. The point is to have a constant address at
|
||||
* compile time, but to set the physical address only
|
||||
* in the boot process.
|
||||
*
|
||||
* These 'compile-time allocated' memory buffers are
|
||||
* fixed-size 4k pages (or larger if used with an increment
|
||||
* higher than 1). Use set_fixmap(idx,phys) to associate
|
||||
* physical memory with fixmap indices.
|
||||
*
|
||||
* TLB entries of such buffers will not be flushed across
|
||||
* task switches.
|
||||
*/
|
||||
|
||||
enum fixed_addresses {
|
||||
VSYSCALL_LAST_PAGE,
|
||||
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
|
||||
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
|
||||
VSYSCALL_HPET,
|
||||
FIX_DBGP_BASE,
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
|
||||
FIX_IO_APIC_BASE_0,
|
||||
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
FIX_PARAVIRT_BOOTMAP,
|
||||
#endif
|
||||
__end_of_permanent_fixed_addresses,
|
||||
#ifdef CONFIG_ACPI
|
||||
FIX_ACPI_BEGIN,
|
||||
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
|
||||
FIX_OHCI1394_BASE,
|
||||
#endif
|
||||
/*
|
||||
* 256 temporary boot-time mappings, used by early_ioremap(),
|
||||
* before ioremap() is functional.
|
||||
*
|
||||
* We round it up to the next 256 pages boundary so that we
|
||||
* can have a single pgd entry and a single pte table:
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 64
|
||||
#define FIX_BTMAPS_SLOTS 4
|
||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
|
||||
(__end_of_permanent_fixed_addresses & 255),
|
||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
|
||||
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
||||
|
||||
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
|
||||
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
|
||||
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
|
||||
|
||||
#endif /* _ASM_X86_FIXMAP_64_H */
|
|
@ -1,5 +1 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include "genapic_32.h"
|
||||
#else
|
||||
# include "genapic_64.h"
|
||||
#endif
|
||||
#include <asm/apic.h>
|
||||
|
|
|
@ -1,148 +0,0 @@
|
|||
#ifndef _ASM_X86_GENAPIC_32_H
|
||||
#define _ASM_X86_GENAPIC_32_H
|
||||
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Generic APIC driver interface.
|
||||
*
|
||||
* An straight forward mapping of the APIC related parts of the
|
||||
* x86 subarchitecture interface to a dynamic object.
|
||||
*
|
||||
* This is used by the "generic" x86 subarchitecture.
|
||||
*
|
||||
* Copyright 2003 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
|
||||
struct mpc_bus;
|
||||
struct mpc_table;
|
||||
struct mpc_cpu;
|
||||
|
||||
struct genapic {
|
||||
char *name;
|
||||
int (*probe)(void);
|
||||
|
||||
int (*apic_id_registered)(void);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
int int_delivery_mode;
|
||||
int int_dest_mode;
|
||||
int ESR_DISABLE;
|
||||
int apic_destination_logical;
|
||||
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
int no_balance_irq;
|
||||
int no_ioapic_check;
|
||||
void (*init_apic_ldr)(void);
|
||||
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
|
||||
|
||||
void (*setup_apic_routing)(void);
|
||||
int (*multi_timer_check)(int apic, int irq);
|
||||
int (*apicid_to_node)(int logical_apicid);
|
||||
int (*cpu_to_logical_apicid)(int cpu);
|
||||
int (*cpu_present_to_apicid)(int mps_cpu);
|
||||
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
|
||||
void (*setup_portio_remap)(void);
|
||||
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
|
||||
void (*enable_apic_mode)(void);
|
||||
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
|
||||
|
||||
/* mpparse */
|
||||
/* When one of the next two hooks returns 1 the genapic
|
||||
is switched to this. Essentially they are additional probe
|
||||
functions. */
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
|
||||
char *productid);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
|
||||
unsigned (*get_apic_id)(unsigned long x);
|
||||
unsigned long apic_id_mask;
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
#endif
|
||||
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
|
||||
int trampoline_phys_low;
|
||||
int trampoline_phys_high;
|
||||
void (*wait_for_init_deassert)(atomic_t *deassert);
|
||||
void (*smp_callin_clear_local_apic)(void);
|
||||
void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
|
||||
void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
|
||||
void (*inquire_remote_apic)(int apicid);
|
||||
};
|
||||
|
||||
#define APICFUNC(x) .x = x,
|
||||
|
||||
/* More functions could be probably marked IPIFUNC and save some space
|
||||
in UP GENERICARCH kernels, but I don't have the nerve right now
|
||||
to untangle this mess. -AK */
|
||||
#ifdef CONFIG_SMP
|
||||
#define IPIFUNC(x) APICFUNC(x)
|
||||
#else
|
||||
#define IPIFUNC(x)
|
||||
#endif
|
||||
|
||||
#define APIC_INIT(aname, aprobe) \
|
||||
{ \
|
||||
.name = aname, \
|
||||
.probe = aprobe, \
|
||||
.int_delivery_mode = INT_DELIVERY_MODE, \
|
||||
.int_dest_mode = INT_DEST_MODE, \
|
||||
.no_balance_irq = NO_BALANCE_IRQ, \
|
||||
.ESR_DISABLE = esr_disable, \
|
||||
.apic_destination_logical = APIC_DEST_LOGICAL, \
|
||||
APICFUNC(apic_id_registered) \
|
||||
APICFUNC(target_cpus) \
|
||||
APICFUNC(check_apicid_used) \
|
||||
APICFUNC(check_apicid_present) \
|
||||
APICFUNC(init_apic_ldr) \
|
||||
APICFUNC(ioapic_phys_id_map) \
|
||||
APICFUNC(setup_apic_routing) \
|
||||
APICFUNC(multi_timer_check) \
|
||||
APICFUNC(apicid_to_node) \
|
||||
APICFUNC(cpu_to_logical_apicid) \
|
||||
APICFUNC(cpu_present_to_apicid) \
|
||||
APICFUNC(apicid_to_cpu_present) \
|
||||
APICFUNC(setup_portio_remap) \
|
||||
APICFUNC(check_phys_apicid_present) \
|
||||
APICFUNC(mps_oem_check) \
|
||||
APICFUNC(get_apic_id) \
|
||||
.apic_id_mask = APIC_ID_MASK, \
|
||||
APICFUNC(cpu_mask_to_apicid) \
|
||||
APICFUNC(cpu_mask_to_apicid_and) \
|
||||
APICFUNC(vector_allocation_domain) \
|
||||
APICFUNC(acpi_madt_oem_check) \
|
||||
IPIFUNC(send_IPI_mask) \
|
||||
IPIFUNC(send_IPI_allbutself) \
|
||||
IPIFUNC(send_IPI_all) \
|
||||
APICFUNC(enable_apic_mode) \
|
||||
APICFUNC(phys_pkg_id) \
|
||||
.trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
|
||||
.trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
|
||||
APICFUNC(wait_for_init_deassert) \
|
||||
APICFUNC(smp_callin_clear_local_apic) \
|
||||
APICFUNC(store_NMI_vector) \
|
||||
APICFUNC(restore_NMI_vector) \
|
||||
APICFUNC(inquire_remote_apic) \
|
||||
}
|
||||
|
||||
extern struct genapic *genapic;
|
||||
extern void es7000_update_genapic_to_cluster(void);
|
||||
|
||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
|
||||
#define get_uv_system_type() UV_NONE
|
||||
#define is_uv_system() 0
|
||||
#define uv_wakeup_secondary(a, b) 1
|
||||
#define uv_system_init() do {} while (0)
|
||||
|
||||
|
||||
#endif /* _ASM_X86_GENAPIC_32_H */
|
|
@ -1,66 +0,0 @@
|
|||
#ifndef _ASM_X86_GENAPIC_64_H
|
||||
#define _ASM_X86_GENAPIC_64_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
*
|
||||
* Generic APIC sub-arch data struct.
|
||||
*
|
||||
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
|
||||
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
|
||||
* James Cleverdon.
|
||||
*/
|
||||
|
||||
struct genapic {
|
||||
char *name;
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
u32 int_delivery_mode;
|
||||
u32 int_dest_mode;
|
||||
int (*apic_id_registered)(void);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
/* */
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
unsigned int (*phys_pkg_id)(int index_msb);
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
/* wakeup_secondary_cpu */
|
||||
int (*wakeup_cpu)(int apicid, unsigned long start_eip);
|
||||
};
|
||||
|
||||
extern struct genapic *genapic;
|
||||
|
||||
extern struct genapic apic_flat;
|
||||
extern struct genapic apic_physflat;
|
||||
extern struct genapic apic_x2apic_cluster;
|
||||
extern struct genapic apic_x2apic_phys;
|
||||
extern int acpi_madt_oem_check(char *, char *);
|
||||
|
||||
extern void apic_send_IPI_self(int vector);
|
||||
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
|
||||
extern enum uv_system_type get_uv_system_type(void);
|
||||
extern int is_uv_system(void);
|
||||
|
||||
extern struct genapic apic_x2apic_uv_x;
|
||||
DECLARE_PER_CPU(int, x2apic_extra_bits);
|
||||
extern void uv_cpu_init(void);
|
||||
extern void uv_system_init(void);
|
||||
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
|
||||
|
||||
extern void setup_apic_routing(void);
|
||||
|
||||
#endif /* _ASM_X86_GENAPIC_64_H */
|
|
@ -1,11 +1,52 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include "hardirq_32.h"
|
||||
#else
|
||||
# include "hardirq_64.h"
|
||||
#ifndef _ASM_X86_HARDIRQ_H
|
||||
#define _ASM_X86_HARDIRQ_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
unsigned int __nmi_count; /* arch dependent */
|
||||
unsigned int irq0_irqs;
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
unsigned int apic_timer_irqs; /* arch dependent */
|
||||
unsigned int irq_spurious_count;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int irq_resched_count;
|
||||
unsigned int irq_call_count;
|
||||
unsigned int irq_tlb_count;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE
|
||||
unsigned int irq_thermal_count;
|
||||
# ifdef CONFIG_X86_64
|
||||
unsigned int irq_threshold_count;
|
||||
# endif
|
||||
#endif
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
||||
|
||||
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
|
||||
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
|
||||
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
|
||||
|
||||
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
|
||||
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING
|
||||
|
||||
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
|
||||
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
|
||||
|
||||
extern void ack_bad_irq(unsigned int irq);
|
||||
|
||||
extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
||||
#define arch_irq_stat_cpu arch_irq_stat_cpu
|
||||
|
||||
extern u64 arch_irq_stat(void);
|
||||
#define arch_irq_stat arch_irq_stat
|
||||
|
||||
#endif /* _ASM_X86_HARDIRQ_H */
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
#ifndef _ASM_X86_HARDIRQ_32_H
|
||||
#define _ASM_X86_HARDIRQ_32_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
unsigned long idle_timestamp;
|
||||
unsigned int __nmi_count; /* arch dependent */
|
||||
unsigned int apic_timer_irqs; /* arch dependent */
|
||||
unsigned int irq0_irqs;
|
||||
unsigned int irq_resched_count;
|
||||
unsigned int irq_call_count;
|
||||
unsigned int irq_tlb_count;
|
||||
unsigned int irq_thermal_count;
|
||||
unsigned int irq_spurious_count;
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
||||
|
||||
#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
#include <linux/irq_cpustat.h>
|
||||
|
||||
#endif /* _ASM_X86_HARDIRQ_32_H */
|
|
@ -1,25 +0,0 @@
|
|||
#ifndef _ASM_X86_HARDIRQ_64_H
|
||||
#define _ASM_X86_HARDIRQ_64_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/pda.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
|
||||
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
|
||||
|
||||
#define __ARCH_IRQ_STAT 1
|
||||
|
||||
#define inc_irq_stat(member) add_pda(member, 1)
|
||||
|
||||
#define local_softirq_pending() read_pda(__softirq_pending)
|
||||
|
||||
#define __ARCH_SET_SOFTIRQ_PENDING 1
|
||||
|
||||
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
|
||||
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
|
||||
|
||||
extern void ack_bad_irq(unsigned int irq);
|
||||
|
||||
#endif /* _ASM_X86_HARDIRQ_64_H */
|
|
@ -25,8 +25,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define platform_legacy_irq(irq) ((irq) < 16)
|
||||
|
||||
/* Interrupt handlers registered during init_IRQ */
|
||||
extern void apic_timer_interrupt(void);
|
||||
extern void error_interrupt(void);
|
||||
|
@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
|
|||
extern void init_8259A(int aeoi);
|
||||
|
||||
/* IOAPIC */
|
||||
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
|
||||
#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
|
||||
extern unsigned long io_apic_irqs;
|
||||
|
||||
extern void init_VISWS_APIC_irqs(void);
|
||||
|
@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
|
|||
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
|
||||
extern void setup_ioapic_dest(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void enable_IO_APIC(void);
|
||||
#endif
|
||||
|
||||
/* IPI functions */
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void send_IPI_self(int vector);
|
||||
#endif
|
||||
extern void send_IPI(int dest, int vector);
|
||||
|
||||
/* Statistics */
|
||||
extern atomic_t irq_err_count;
|
||||
|
@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
|
|||
/* EISA */
|
||||
extern void eisa_set_level_irq(unsigned int irq);
|
||||
|
||||
/* Voyager functions */
|
||||
extern asmlinkage void vic_cpi_interrupt(void);
|
||||
extern asmlinkage void vic_sys_interrupt(void);
|
||||
extern asmlinkage void vic_cmn_interrupt(void);
|
||||
extern asmlinkage void qic_timer_interrupt(void);
|
||||
extern asmlinkage void qic_invalidate_interrupt(void);
|
||||
extern asmlinkage void qic_reschedule_interrupt(void);
|
||||
extern asmlinkage void qic_enable_irq_interrupt(void);
|
||||
extern asmlinkage void qic_call_function_interrupt(void);
|
||||
|
||||
/* SMP */
|
||||
extern void smp_apic_timer_interrupt(struct pt_regs *);
|
||||
extern void smp_spurious_interrupt(struct pt_regs *);
|
||||
extern void smp_error_interrupt(struct pt_regs *);
|
||||
#ifdef CONFIG_X86_SMP
|
||||
#ifdef CONFIG_SMP
|
||||
extern void smp_reschedule_interrupt(struct pt_regs *);
|
||||
extern void smp_call_function_interrupt(struct pt_regs *);
|
||||
extern void smp_call_function_single_interrupt(struct pt_regs *);
|
||||
|
|
|
@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
|
|||
extern void mask_8259A(void);
|
||||
extern void unmask_8259A(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void init_ISA_irqs(void);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_I8259_H */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define build_mmio_read(name, size, type, reg, barrier) \
|
||||
static inline type name(const volatile void __iomem *addr) \
|
||||
|
@ -80,6 +81,98 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
|
|||
#define readq readq
|
||||
#define writeq writeq
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline phys_addr_t virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
* However, we truncate the address to unsigned int to avoid undesirable
|
||||
* promitions in legacy drivers.
|
||||
*/
|
||||
static inline unsigned int isa_virt_to_bus(volatile void *address)
|
||||
{
|
||||
return (unsigned int)virt_to_phys(address);
|
||||
}
|
||||
#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "io_32.h"
|
||||
#else
|
||||
|
@ -91,7 +184,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
|||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* early_ioremap() and early_iounmap() are for temporary early boot-time
|
||||
|
@ -103,7 +196,7 @@ extern void early_ioremap_reset(void);
|
|||
extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
|
||||
extern void early_iounmap(void __iomem *addr, unsigned long size);
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#endif /* _ASM_X86_IO_H */
|
||||
|
|
|
@ -37,8 +37,6 @@
|
|||
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
*/
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#define XQUAD_PORTIO_BASE 0xfe400000
|
||||
#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
|
||||
|
||||
|
@ -53,92 +51,6 @@
|
|||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
static inline void
|
||||
memset_io(volatile void __iomem *addr, unsigned char val, int count)
|
||||
{
|
||||
|
|
|
@ -136,73 +136,12 @@ __OUTS(b)
|
|||
__OUTS(w)
|
||||
__OUTS(l)
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
#if defined(__KERNEL__) && defined(__x86_64__)
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifndef __i386__
|
||||
/*
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
* These are pretty trivial
|
||||
*/
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
/*
|
||||
* This one maps high address device memory and turns off caching for that area.
|
||||
* it's useful if some control registers are in such an area and write combining
|
||||
* or read caching is not desirable:
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
void __memcpy_fromio(void *, unsigned long, unsigned);
|
||||
void __memcpy_toio(unsigned long, const void *, unsigned);
|
||||
|
||||
|
|
|
@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
|
|||
extern int nr_ioapics;
|
||||
extern int nr_ioapic_registers[MAX_IO_APICS];
|
||||
|
||||
/*
|
||||
* MP-BIOS irq configuration table structures:
|
||||
*/
|
||||
|
||||
#define MP_MAX_IOAPIC_PIN 127
|
||||
|
||||
struct mp_config_ioapic {
|
||||
unsigned long mp_apicaddr;
|
||||
unsigned int mp_apicid;
|
||||
unsigned char mp_type;
|
||||
unsigned char mp_apicver;
|
||||
unsigned char mp_flags;
|
||||
};
|
||||
|
||||
struct mp_config_intsrc {
|
||||
unsigned int mp_dstapic;
|
||||
unsigned char mp_type;
|
||||
unsigned char mp_irqtype;
|
||||
unsigned short mp_irqflag;
|
||||
unsigned char mp_srcbus;
|
||||
unsigned char mp_srcbusirq;
|
||||
unsigned char mp_dstirq;
|
||||
};
|
||||
|
||||
/* I/O APIC entries */
|
||||
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
|
||||
extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
|
||||
|
||||
/* # of MP IRQ source entries */
|
||||
extern int mp_irq_entries;
|
||||
|
||||
/* MP IRQ source entries */
|
||||
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
||||
extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
||||
|
||||
/* non-0 if default (table-less) MP configuration */
|
||||
extern int mpc_default_type;
|
||||
|
@ -165,15 +143,6 @@ extern int noioapicreroute;
|
|||
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
|
||||
extern int timer_through_8259;
|
||||
|
||||
static inline void disable_ioapic_setup(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
noioapicquirk = 1;
|
||||
noioapicreroute = -1;
|
||||
#endif
|
||||
skip_ioapic_setup = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we use the IO-APIC for IRQ routing, disable automatic
|
||||
* assignment of PCI IRQ's.
|
||||
|
@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
|
|||
|
||||
extern void probe_nr_irqs_gsi(void);
|
||||
|
||||
extern int setup_ioapic_entry(int apic, int irq,
|
||||
struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int trigger,
|
||||
int polarity, int vector);
|
||||
extern void ioapic_write_entry(int apic, int pin,
|
||||
struct IO_APIC_route_entry e);
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
#define io_apic_assign_pci_irqs 0
|
||||
static const int timer_through_8259 = 0;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_IPI_H
|
||||
#define _ASM_X86_IPI_H
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
|
@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
|
||||
unsigned int dest)
|
||||
static inline void
|
||||
__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
|
||||
{
|
||||
/*
|
||||
* Subtle. In the case of the 'never do double writes' workaround
|
||||
|
@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
|
|||
* This is used to send an IPI with no shorthand notation (the destination is
|
||||
* specified in bits 56 to 63 of the ICR).
|
||||
*/
|
||||
static inline void __send_IPI_dest_field(unsigned int mask, int vector,
|
||||
unsigned int dest)
|
||||
static inline void
|
||||
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
|
||||
{
|
||||
unsigned long cfg;
|
||||
|
||||
|
@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
|
|||
native_apic_mem_write(APIC_ICR, cfg);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_sequence(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
|
||||
/*
|
||||
* Hack. The clustered APIC addressing mode doesn't allow us to send
|
||||
* to an arbitrary mask, so I do a unicast to each CPU instead.
|
||||
* - mbligh
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask) {
|
||||
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
extern int no_broadcast;
|
||||
|
||||
static inline void __default_local_send_IPI_allbutself(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
else
|
||||
__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
|
||||
int vector)
|
||||
static inline void __default_local_send_IPI_all(int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int query_cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
/* See Hack comment above */
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
if (query_cpu != this_cpu)
|
||||
__send_IPI_dest_field(
|
||||
per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
local_irq_restore(flags);
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
apic->send_IPI_mask(cpu_online_mask, vector);
|
||||
else
|
||||
__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_allbutself(int vector);
|
||||
extern void default_send_IPI_all(int vector);
|
||||
extern void default_send_IPI_self(int vector);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IPI_H */
|
||||
|
|
|
@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
|
|||
extern void fixup_irqs(void);
|
||||
#endif
|
||||
|
||||
extern unsigned int do_IRQ(struct pt_regs *regs);
|
||||
extern void init_IRQ(void);
|
||||
extern void native_init_IRQ(void);
|
||||
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
|
||||
|
||||
extern unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
||||
/* Interrupt vector management */
|
||||
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||
|
|
|
@ -1,5 +1,31 @@
|
|||
#ifdef CONFIG_X86_32
|
||||
# include "irq_regs_32.h"
|
||||
#else
|
||||
# include "irq_regs_64.h"
|
||||
#endif
|
||||
/*
|
||||
* Per-cpu current frame pointer - the location of the last exception frame on
|
||||
* the stack, stored in the per-cpu area.
|
||||
*
|
||||
* Jeremy Fitzhardinge <jeremy@goop.org>
|
||||
*/
|
||||
#ifndef _ASM_X86_IRQ_REGS_H
|
||||
#define _ASM_X86_IRQ_REGS_H
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#define ARCH_HAS_OWN_IRQ_REGS
|
||||
|
||||
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
|
||||
|
||||
static inline struct pt_regs *get_irq_regs(void)
|
||||
{
|
||||
return percpu_read(irq_regs);
|
||||
}
|
||||
|
||||
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = get_irq_regs();
|
||||
percpu_write(irq_regs, new_regs);
|
||||
|
||||
return old_regs;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_IRQ_REGS_32_H */
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Per-cpu current frame pointer - the location of the last exception frame on
|
||||
* the stack, stored in the per-cpu area.
|
||||
*
|
||||
* Jeremy Fitzhardinge <jeremy@goop.org>
|
||||
*/
|
||||
#ifndef _ASM_X86_IRQ_REGS_32_H
|
||||
#define _ASM_X86_IRQ_REGS_32_H
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#define ARCH_HAS_OWN_IRQ_REGS
|
||||
|
||||
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
|
||||
|
||||
static inline struct pt_regs *get_irq_regs(void)
|
||||
{
|
||||
return x86_read_percpu(irq_regs);
|
||||
}
|
||||
|
||||
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = get_irq_regs();
|
||||
x86_write_percpu(irq_regs, new_regs);
|
||||
|
||||
return old_regs;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_IRQ_REGS_32_H */
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче