Merge commit 'remotes/tip/x86/paravirt' into x86/untangle2

* commit 'remotes/tip/x86/paravirt': (175 commits)
  xen: use direct ops on 64-bit
  xen: make direct versions of irq_enable/disable/save/restore to common code
  xen: setup percpu data pointers
  xen: fix 32-bit build resulting from mmu move
  x86/paravirt: return full 64-bit result
  x86, percpu: fix kexec with vmlinux
  x86/vmi: fix interrupt enable/disable/save/restore calling convention.
  x86/paravirt: don't restore second return reg
  xen: setup percpu data pointers
  x86: split loading percpu segments from loading gdt
  x86: pass in cpu number to switch_to_new_gdt()
  x86: UV fix uv_flush_send_and_wait()
  x86/paravirt: fix missing callee-save call on pud_val
  x86/paravirt: use callee-saved convention for pte_val/make_pte/etc
  x86/paravirt: implement PVOP_CALL macros for callee-save functions
  x86/paravirt: add register-saving thunks to reduce caller register pressure
  x86/paravirt: selectively save/restore regs around pvops calls
  x86: fix paravirt clobber in entry_64.S
  x86/pvops: add a paravirt_ident functions to allow special patching
  xen: move remaining mmu-related stuff into mmu.c
  ...

Conflicts:
	arch/x86/mach-voyager/voyager_smp.c
	arch/x86/mm/fault.c
This commit is contained in:
Jeremy Fitzhardinge 2009-02-11 11:52:22 -08:00
Родитель c47c1b1f3a e4d0407185
Коммит 9049a11de7
181 изменённых файлов: 3697 добавлений и 3503 удалений

Просмотреть файл

@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h:
#define topology_physical_package_id(cpu)
#define topology_core_id(cpu)
#define topology_thread_siblings(cpu)
#define topology_core_siblings(cpu)
#define topology_thread_cpumask(cpu)
#define topology_core_cpumask(cpu)
The type of **_id is int.
The type of siblings is cpumask_t.
The type of siblings is (const) struct cpumask *.
To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are

Просмотреть файл

@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
return 0;
}

Просмотреть файл

@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
#ifdef CONFIG_SMP
bad_irq_desc.affinity = CPU_MASK_ALL;
cpumask_setall(bad_irq_desc.affinity);
bad_irq_desc.cpu = smp_processor_id();
#endif
init_arch_irq();
@ -191,15 +196,16 @@ void migrate_irqs(void)
struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) {
unsigned int newcpu = any_online_cpu(desc->affinity);
if (newcpu == NR_CPUS) {
unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu);
cpus_setall(desc->affinity);
newcpu = any_online_cpu(desc->affinity);
cpumask_setall(desc->affinity);
newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
}
route_irq(desc, i, newcpu);

Просмотреть файл

@ -65,6 +65,7 @@ SECTIONS
#endif
. = ALIGN(4096);
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;

Просмотреть файл

@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
const struct cpumask *mask = cpumask_of(cpu);
spin_lock_irq(&desc->lock);
desc->affinity = *mask;
cpumask_copy(desc->affinity, mask);
desc->chip->set_affinity(irq, mask);
spin_unlock_irq(&desc->lock);
}

Просмотреть файл

@ -70,6 +70,11 @@ static struct irq_desc bad_irq_desc = {
#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;

Просмотреть файл

@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
.child = NULL, \
.groups = NULL, \
.min_interval = 8, \
.max_interval = 8*(min(num_online_cpus(), 32)), \
.max_interval = 8*(min(num_online_cpus(), 32U)), \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 2, \

Просмотреть файл

@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
cpus_setall(idesc->affinity);
cpumask_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;

Просмотреть файл

@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
cpumask_copy(&irq_desc[irq].affinity,
cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) {
/*
* Save it for phase 2 processing

Просмотреть файл

@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
struct irq_desc *desc = irq_to_desc(vector);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
kstat_incr_irqs_this_cpu(vector, desc);
else {
int irq = local_vector_to_irq(vector);
@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
struct irq_desc *desc = irq_to_desc(vector);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
kstat_incr_irqs_this_cpu(vector, desc);
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
int irq = local_vector_to_irq(vector);

Просмотреть файл

@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data;
write_msi_msg(irq, &msg);
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
}
#endif /* CONFIG_SMP */
@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
irq_desc[irq].affinity = *mask;
cpumask_copy(irq_desc[irq].affinity, mask);
}
#endif /* CONFIG_SMP */

Просмотреть файл

@ -219,6 +219,7 @@ SECTIONS
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;

Просмотреть файл

@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
irq_desc[irq].affinity = *cpu_mask;
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
}
#endif /* CONFIG_SMP */

Просмотреть файл

@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
*/
#define IRQ_AFFINITY_HOOK(irq) \
do { \
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
smtc_forward_irq(irq); \
irq_exit(); \
return; \

Просмотреть файл

@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
}
irq_desc[irq].affinity = *cpumask;
cpumask_copy(irq_desc[irq].affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
}

Просмотреть файл

@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
* and efficiency, we just pick the easiest one to find.
*/
target = first_cpu(irq_desc[irq].affinity);
target = cpumask_first(irq_desc[irq].affinity);
/*
* We depend on the platform code to have correctly processed
@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
int irq = MIPS_CPU_IRQ_BASE + 1;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
irq_exit();

Просмотреть файл

@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t tmask = *affinity;
cpumask_t tmask;
int cpu = 0;
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* be made to forward to an offline "CPU".
*/
cpumask_copy(&tmask, affinity);
for_each_cpu(cpu, affinity) {
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask);
}
irq_desc[irq].affinity = tmask;
cpumask_copy(irq_desc[irq].affinity, &tmask);
if (cpus_empty(tmask))
/*

Просмотреть файл

@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
int irq = SGI_BUSERR_IRQ;
irq_enter();
kstat_this_cpu.irqs[irq]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
ip22_be_interrupt(irq);
irq_exit();
}

Просмотреть файл

@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
char c;
irq_enter();
kstat_this_cpu.irqs[irq]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode();

Просмотреть файл

@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
void bcm1480_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
int irq = K_BCM1480_INT_MBOX_0_0;
unsigned int action;
kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;

Просмотреть файл

@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
void sb1250_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
int irq = K_INT_MBOX_0;
unsigned int action;
kstat_this_cpu.irqs[K_INT_MBOX_0]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;

Просмотреть файл

@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
int sum, cpu = smp_processor_id();
int irq = NMIIRQ;
u8 wdt, tmp;
wdt = WDCTR & ~WDCTR_WDCNE;
@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
NMICR = NMICR_WDIF;
nmi_count(cpu)++;
kstat_this_cpu.irqs[NMIIRQ]++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
sum = irq_stat[cpu].__irq_count;
if (last_irq_sums[cpu] == sum) {

Просмотреть файл

@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
irq_desc[irq].affinity = CPU_MASK_ALL;
cpumask_setall(irq_desc[irq].affinity);
return -EINVAL;
}
@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if (cpu_check_affinity(irq, dest))
return;
irq_desc[irq].affinity = *dest;
cpumask_copy(irq_desc[irq].affinity, dest);
}
#endif
@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
dest = irq_desc[irq].affinity;
cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);

Просмотреть файл

@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU)
continue;
cpus_and(mask, irq_desc[irq].affinity, map);
cpumask_and(&mask, irq_desc[irq].affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;

Просмотреть файл

@ -184,6 +184,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
*(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;

Просмотреть файл

@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
{
int server;
/* For the moment only implement delivery to all cpus or one cpu */
cpumask_t cpumask = irq_desc[virq].affinity;
cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE;
cpumask_copy(&cpumask, irq_desc[virq].affinity);
if (!distribute_irqs)
return default_server;
@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
irq_desc[virq].affinity = CPU_MASK_ALL;
cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);

Просмотреть файл

@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
cpumask_t mask = irq_desc[virt_irq].affinity;
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock);

Просмотреть файл

@ -252,9 +252,10 @@ struct irq_handler_data {
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
cpumask_t mask = irq_desc[virt_irq].affinity;
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock);
@ -796,7 +797,7 @@ void fixup_irqs(void)
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
&irq_desc[irq].affinity);
irq_desc[irq].affinity);
}
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}

Просмотреть файл

@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter();
kstat_this_cpu.irqs[0]++;
kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
if (unlikely(!evt->event_handler)) {
printk(KERN_WARNING

Просмотреть файл

@ -133,7 +133,7 @@ config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
def_bool y
config HAVE_CPUMASK_OF_CPU_MAP
def_bool X86_64_SMP
@ -391,6 +391,13 @@ config X86_RDC321X
as R-8610-(G).
If you don't have one of these chips, you should say N here.
config X86_UV
bool "SGI Ultraviolet"
depends on X86_64
help
This option is needed in order to support SGI Ultraviolet systems.
If you don't have one of these, you should say N here.
config SCHED_OMIT_FRAME_POINTER
def_bool y
prompt "Single-depth WCHAN output"
@ -1340,13 +1347,17 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config CC_STACKPROTECTOR_ALL
bool
config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
depends on X86_64 && EXPERIMENTAL && BROKEN
depends on X86_64
select CC_STACKPROTECTOR_ALL
help
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of critical functions, a canary
value on the stack just before the return address, and validates
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
the stack just before the return address, and validates
the value just before actually returning. Stack based buffer
overflows (that need to overwrite this return address) now also
overwrite the canary, which gets detected and the attack is then
@ -1354,15 +1365,8 @@ config CC_STACKPROTECTOR
This feature requires gcc version 4.2 or above, or a distribution
gcc with the feature backported. Older versions are automatically
detected and for those versions, this configuration option is ignored.
config CC_STACKPROTECTOR_ALL
bool "Use stack-protector for all functions"
depends on CC_STACKPROTECTOR
help
Normally, GCC only inserts the canary value protection for
functions that use large-ish on-stack buffers. By enabling
this option, GCC will be asked to do this for ALL functions.
detected and for those versions, this configuration option is
ignored. (and a warning is printed during bootup)
source kernel/Kconfig.hz

Просмотреть файл

@ -292,25 +292,23 @@ config X86_CPU
# Define implied options from the CPU selection here
config X86_L1_CACHE_BYTES
int
default "128" if GENERIC_CPU || MPSC
default "64" if MK8 || MCORE2
depends on X86_64
default "128" if MPSC
default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
config X86_INTERNODE_CACHE_BYTES
int
default "4096" if X86_VSMP
default X86_L1_CACHE_BYTES if !X86_VSMP
depends on X86_64
config X86_CMPXCHG
def_bool X86_64 || (X86_32 && !M386)
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
default "7" if MPENTIUM4 || MPSC
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
config X86_XADD
def_bool y

Просмотреть файл

@ -117,6 +117,7 @@ config DEBUG_RODATA
config DEBUG_RODATA_TEST
bool "Testcase for the DEBUG_RODATA feature"
depends on DEBUG_RODATA
default y
help
This option enables a testcase for the DEBUG_RODATA
feature as well as for the change_page_attr() infrastructure.

Просмотреть файл

@ -73,7 +73,7 @@ else
stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
"$(CC)" -fstack-protector )
"$(CC)" "-fstack-protector -DGCC_HAS_SP" )
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
"$(CC)" -fstack-protector-all )

Просмотреть файл

@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
SWAPGS_UNSAFE_STACK
movq %gs:pda_kernelstack, %rsp
addq $(PDA_STACKOFFSET),%rsp
movq PER_CPU_VAR(kernel_stack), %rsp
addq $(KERNEL_STACK_OFFSET),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs, here we enable it straight after entry:
@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq %gs:pda_kernelstack,%rsp
movq PER_CPU_VAR(kernel_stack),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:

Просмотреть файл

@ -0,0 +1,12 @@
#ifndef _ASM_X86_APICNUM_H
#define _ASM_X86_APICNUM_H
/* define MAX_IO_APICS */
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif /* _ASM_X86_APICNUM_H */

Просмотреть файл

@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
#ifdef CONFIG_SMP
extern void prefill_possible_map(void);
#else /* CONFIG_SMP */
static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
struct x86_cpu {
struct cpu cpu;
};
@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU(int, cpu_state);
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif
#endif /* _ASM_X86_CPU_H */

Просмотреть файл

@ -0,0 +1,32 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#ifdef CONFIG_X86_64
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
extern void setup_cpu_local_masks(void);
#else /* CONFIG_X86_32 */
extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
static inline void setup_cpu_local_masks(void) { }
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */

Просмотреть файл

@ -1,39 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>
#ifndef __ASSEMBLY__
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}
#else /* X86_32 */
#ifndef __ASSEMBLY__
#include <asm/pda.h>
struct task_struct;
static __always_inline struct task_struct *get_current(void)
{
return read_pda(pcurrent);
return percpu_read(current_task);
}
#else /* __ASSEMBLY__ */
#include <asm/asm-offsets.h>
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
#endif /* __ASSEMBLY__ */
#endif /* X86_32 */
#define current get_current()
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CURRENT_H */

Просмотреть файл

@ -138,11 +138,4 @@ struct genapic {
extern struct genapic *genapic;
extern void es7000_update_genapic_to_cluster(void);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
#define uv_wakeup_secondary(a, b) 1
#define uv_system_init() do {} while (0)
#endif /* _ASM_X86_GENAPIC_32_H */

Просмотреть файл

@ -51,15 +51,9 @@ extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void uv_cpu_init(void);
extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
extern void setup_apic_routing(void);

Просмотреть файл

@ -1,11 +1,52 @@
#ifdef CONFIG_X86_32
# include "hardirq_32.h"
#else
# include "hardirq_64.h"
#ifndef _ASM_X86_HARDIRQ_H
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */
unsigned int irq0_irqs;
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
#endif
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_MCE
unsigned int irq_thermal_count;
# ifdef CONFIG_X86_64
unsigned int irq_threshold_count;
# endif
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
#endif /* _ASM_X86_HARDIRQ_H */

Просмотреть файл

@ -1,30 +0,0 @@
#ifndef _ASM_X86_HARDIRQ_32_H
#define _ASM_X86_HARDIRQ_32_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned long idle_timestamp;
unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq0_irqs;
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_spurious_count;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h>
#endif /* _ASM_X86_HARDIRQ_32_H */

Просмотреть файл

@ -1,25 +0,0 @@
#ifndef _ASM_X86_HARDIRQ_64_H
#define _ASM_X86_HARDIRQ_64_H
#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/pda.h>
#include <asm/apic.h>
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT 1
#define inc_irq_stat(member) add_pda(member, 1)
#define local_softirq_pending() read_pda(__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
#endif /* _ASM_X86_HARDIRQ_64_H */

Просмотреть файл

@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
/*
* MP-BIOS irq configuration table structures:
*/
#define MP_MAX_IOAPIC_PIN 127
struct mp_config_ioapic {
unsigned long mp_apicaddr;
unsigned int mp_apicid;
unsigned char mp_type;
unsigned char mp_apicver;
unsigned char mp_flags;
};
struct mp_config_intsrc {
unsigned int mp_dstapic;
unsigned char mp_type;
unsigned char mp_irqtype;
unsigned short mp_irqflag;
unsigned char mp_srcbus;
unsigned char mp_srcbusirq;
unsigned char mp_dstirq;
};
/* I/O APIC entries */
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;

Просмотреть файл

@ -1,5 +1,31 @@
#ifdef CONFIG_X86_32
# include "irq_regs_32.h"
#else
# include "irq_regs_64.h"
#endif
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack, stored in the per-cpu area.
*
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
#ifndef _ASM_X86_IRQ_REGS_H
#define _ASM_X86_IRQ_REGS_H
#include <asm/percpu.h>
#define ARCH_HAS_OWN_IRQ_REGS
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return percpu_read(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = get_irq_regs();
percpu_write(irq_regs, new_regs);
return old_regs;
}
#endif /* _ASM_X86_IRQ_REGS_32_H */

Просмотреть файл

@ -1,31 +0,0 @@
/*
* Per-cpu current frame pointer - the location of the last exception frame on
* the stack, stored in the per-cpu area.
*
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
#ifndef _ASM_X86_IRQ_REGS_32_H
#define _ASM_X86_IRQ_REGS_32_H
#include <asm/percpu.h>
#define ARCH_HAS_OWN_IRQ_REGS
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
return x86_read_percpu(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = get_irq_regs();
x86_write_percpu(irq_regs, new_regs);
return old_regs;
}
#endif /* _ASM_X86_IRQ_REGS_32_H */

Просмотреть файл

@ -1 +0,0 @@
#include <asm-generic/irq_regs.h>

Просмотреть файл

@ -49,31 +49,33 @@
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
*
* Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/
#ifdef CONFIG_X86_32
# define SPURIOUS_APIC_VECTOR 0xff
# define ERROR_APIC_VECTOR 0xfe
# define INVALIDATE_TLB_VECTOR 0xfd
# define RESCHEDULE_VECTOR 0xfc
# define CALL_FUNCTION_VECTOR 0xfb
# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
# define THERMAL_APIC_VECTOR 0xf0
# define RESCHEDULE_VECTOR 0xfd
# define CALL_FUNCTION_VECTOR 0xfc
# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
# define THERMAL_APIC_VECTOR 0xfa
/* 0xf8 - 0xf9 : free */
# define INVALIDATE_TLB_VECTOR_END 0xf7
# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
# define NUM_INVALIDATE_TLB_VECTORS 8
#else
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
#define UV_BAU_MESSAGE 0xf8
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
# define SPURIOUS_APIC_VECTOR 0xff
# define ERROR_APIC_VECTOR 0xfe
# define RESCHEDULE_VECTOR 0xfd
# define CALL_FUNCTION_VECTOR 0xfc
# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
# define THERMAL_APIC_VECTOR 0xfa
# define THRESHOLD_APIC_VECTOR 0xf9
# define UV_BAU_MESSAGE 0xf8
# define INVALIDATE_TLB_VECTOR_END 0xf7
# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
@ -105,6 +107,8 @@
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
#include <asm/apicnum.h> /* need MAX_IO_APICS */
#ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
@ -112,11 +116,12 @@
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
#else
# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
# else
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
# define NR_IRQS \
((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
(NR_VECTORS + (8 * NR_CPUS)) : \
(NR_VECTORS + (32 * MAX_IO_APICS))) \
#endif
#elif defined(CONFIG_X86_VOYAGER)

Просмотреть файл

@ -11,10 +11,26 @@
*/
#ifdef CONFIG_X86_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
smp_invalidate_interrupt)
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
smp_invalidate_interrupt)
#endif
/*

Просмотреть файл

@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_X86_32
# include "mmu_context_32.h"
#else
# include "mmu_context_64.h"
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
/* Re-load page tables */
load_cr3(next->pgd);
/*
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define activate_mm(prev, next) \
do { \
@ -33,5 +76,17 @@ do { \
switch_mm((prev), (next), NULL); \
} while (0);
#ifdef CONFIG_X86_32
#define deactivate_mm(tsk, mm) \
do { \
loadsegment(gs, 0); \
} while (0)
#else
#define deactivate_mm(tsk, mm) \
do { \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
#endif
#endif /* _ASM_X86_MMU_CONTEXT_H */

Просмотреть файл

@ -1,55 +0,0 @@
#ifndef _ASM_X86_MMU_CONTEXT_32_H
#define _ASM_X86_MMU_CONTEXT_32_H
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
x86_write_percpu(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
/* Re-load page tables */
load_cr3(next->pgd);
/*
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define deactivate_mm(tsk, mm) \
asm("movl %0,%%gs": :"r" (0));
#endif /* _ASM_X86_MMU_CONTEXT_32_H */

Просмотреть файл

@ -1,54 +0,0 @@
#ifndef _ASM_X86_MMU_CONTEXT_64_H
#define _ASM_X86_MMU_CONTEXT_64_H
#include <asm/pda.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
}
}
#endif
}
#define deactivate_mm(tsk, mm) \
do { \
load_gs_index(0); \
asm volatile("movl %0,%%fs"::"r"(0)); \
} while (0)
#endif /* _ASM_X86_MMU_CONTEXT_64_H */

Просмотреть файл

@ -24,17 +24,18 @@
# endif
#endif
struct intel_mp_floating {
char mpf_signature[4]; /* "_MP_" */
unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
unsigned char mpf_specification;/* Specification version */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */
unsigned char mpf_feature1; /* Standard or configuration ? */
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
unsigned char mpf_feature3; /* Unused (0) */
unsigned char mpf_feature4; /* Unused (0) */
unsigned char mpf_feature5; /* Unused (0) */
/* Intel MP Floating Pointer Structure */
struct mpf_intel {
char signature[4]; /* "_MP_" */
unsigned int physptr; /* Configuration table address */
unsigned char length; /* Our length (paragraphs) */
unsigned char specification; /* Specification version */
unsigned char checksum; /* Checksum (makes sum 0) */
unsigned char feature1; /* Standard or configuration ? */
unsigned char feature2; /* Bit7 set for IMCR|PIC */
unsigned char feature3; /* Unused (0) */
unsigned char feature4; /* Unused (0) */
unsigned char feature5; /* Unused (0) */
};
#define MPC_SIGNATURE "PCMP"

Просмотреть файл

@ -163,7 +163,7 @@ static inline pteval_t native_pte_val(pte_t pte)
return pte.pte;
}
static inline pteval_t native_pte_flags(pte_t pte)
static inline pteval_t pte_flags(pte_t pte)
{
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
@ -189,7 +189,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
#endif
#define pte_val(x) native_pte_val(x)
#define pte_flags(x) native_pte_flags(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */

Просмотреть файл

@ -13,8 +13,8 @@
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQSTACK_ORDER 2
#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2

Просмотреть файл

@ -12,21 +12,38 @@
#define CLBR_EAX (1 << 0)
#define CLBR_ECX (1 << 1)
#define CLBR_EDX (1 << 2)
#define CLBR_EDI (1 << 3)
#ifdef CONFIG_X86_64
#define CLBR_RSI (1 << 3)
#define CLBR_RDI (1 << 4)
#ifdef CONFIG_X86_32
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
#define CLBR_ANY ((1 << 4) - 1)
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
#define CLBR_SCRATCH (0)
#else
#define CLBR_RAX CLBR_EAX
#define CLBR_RCX CLBR_ECX
#define CLBR_RDX CLBR_EDX
#define CLBR_RDI CLBR_EDI
#define CLBR_RSI (1 << 4)
#define CLBR_R8 (1 << 5)
#define CLBR_R9 (1 << 6)
#define CLBR_R10 (1 << 7)
#define CLBR_R11 (1 << 8)
#define CLBR_ANY ((1 << 9) - 1)
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
CLBR_RCX | CLBR_R8 | CLBR_R9)
#define CLBR_RET_REG (CLBR_RAX)
#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
#include <asm/desc_defs.h>
#else
/* CLBR_ANY should match all regs platform has. For i386, that's just it */
#define CLBR_ANY ((1 << 3) - 1)
#endif /* X86_64 */
#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/cpumask.h>
@ -40,6 +57,14 @@ struct tss_struct;
struct mm_struct;
struct desc_struct;
/*
* Wrapper type for pointers to code which uses the non-standard
* calling convention. See PV_CALL_SAVE_REGS_THUNK below.
*/
struct paravirt_callee_save {
void *func;
};
/* general info */
struct pv_info {
unsigned int kernel_rpl;
@ -189,11 +214,15 @@ struct pv_irq_ops {
* expected to use X86_EFLAGS_IF; all other bits
* returned from save_fl are undefined, and may be ignored by
* restore_fl.
*
* NOTE: These functions callers expect the callee to preserve
* more registers than the standard C calling convention.
*/
unsigned long (*save_fl)(void);
void (*restore_fl)(unsigned long);
void (*irq_disable)(void);
void (*irq_enable)(void);
struct paravirt_callee_save save_fl;
struct paravirt_callee_save restore_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
void (*safe_halt)(void);
void (*halt)(void);
@ -244,7 +273,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
unsigned long va);
/* Hooks for allocating and freeing a pagetable top-level */
@ -278,12 +308,11 @@ struct pv_mmu_ops {
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pteval_t (*pte_val)(pte_t);
pteval_t (*pte_flags)(pte_t);
pte_t (*make_pte)(pteval_t pte);
struct paravirt_callee_save pte_val;
struct paravirt_callee_save make_pte;
pgdval_t (*pgd_val)(pgd_t);
pgd_t (*make_pgd)(pgdval_t pgd);
struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
@ -298,12 +327,12 @@ struct pv_mmu_ops {
void (*set_pud)(pud_t *pudp, pud_t pudval);
pmdval_t (*pmd_val)(pmd_t);
pmd_t (*make_pmd)(pmdval_t pmd);
struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd;
#if PAGETABLE_LEVELS == 4
pudval_t (*pud_val)(pud_t);
pud_t (*make_pud)(pudval_t pud);
struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
#endif /* PAGETABLE_LEVELS == 4 */
@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
* makes sure the incoming and outgoing types are always correct.
*/
#ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
#define PVOP_VCALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else
#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
#else /* CONFIG_X86_64 */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
"=S" (__esi), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
#endif
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_PARAVIRT_DEBUG
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
#define PVOP_TEST_NULL(op) ((void)op)
#endif
#define __PVOP_CALL(rettype, op, pre, post, ...) \
#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
pre, post, ...) \
({ \
rettype __ret; \
PVOP_CALL_ARGS; \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
/* This is 32-bit specific, but is okay in 64-bit */ \
/* since this condition will never hold */ \
@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_CALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" EXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_CALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" EXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
__ret = (rettype)__eax; \
} \
__ret; \
})
#define __PVOP_VCALL(op, pre, post, ...) \
#define __PVOP_CALL(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_CALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)
#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
({ \
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: PVOP_VCALL_CLOBBERS \
: call_clbr \
: paravirt_type(op), \
paravirt_clobber(CLBR_ANY), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" VEXTRA_CLOBBERS); \
: "memory", "cc" extra_clbr); \
})
#define __PVOP_VCALL(op, pre, post, ...) \
____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)
#define PVOP_CALL0(rettype, op) \
__PVOP_CALL(rettype, op, "", "")
#define PVOP_VCALL0(op) \
__PVOP_VCALL(op, "", "")
#define PVOP_CALLEE0(rettype, op) \
__PVOP_CALLEESAVE(rettype, op, "", "")
#define PVOP_VCALLEE0(op) \
__PVOP_VCALLEESAVE(op, "", "")
#define PVOP_CALL1(rettype, op, arg1) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_VCALL1(op, arg1) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_CALLEE1(rettype, op, arg1) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_VCALLEE1(op, arg1) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_CALL2(rettype, op, arg1, arg2) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1" ((unsigned long)(arg2)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALL2(op, arg1, arg2) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1" ((unsigned long)(arg2)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_VCALLEE2(op, arg1, arg2) \
__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2))
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
/* This is the only difference in x86_64. We can make it much simpler */
#ifdef CONFIG_X86_32
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
"0" ((u32)(arg1)), "1" ((u32)(arg2)), \
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
#else
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
"3"((unsigned long)(arg4)))
__PVOP_CALL(rettype, op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
"1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
"3"((unsigned long)(arg4)))
__PVOP_VCALL(op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#endif
static inline int paravirt_enabled(void)
@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
}
static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va)
{
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t,
pv_mmu_ops.make_pte,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pteval_t,
pv_mmu_ops.make_pte,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pteval_t,
pv_mmu_ops.make_pte,
val);
ret = PVOP_CALLEE1(pteval_t,
pv_mmu_ops.make_pte,
val);
return (pte_t) { .pte = ret };
}
@ -1075,42 +1171,25 @@ static inline pteval_t pte_val(pte_t pte)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
pte.pte, (u64)pte.pte >> 32);
ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
pte.pte);
ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
pte.pte);
return ret;
}
static inline pteval_t pte_flags(pte_t pte)
{
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
pte.pte);
#ifdef CONFIG_PARAVIRT_DEBUG
BUG_ON(ret & PTE_PFN_MASK);
#endif
return ret;
}
static inline pgd_t __pgd(pgdval_t val)
{
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
val);
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
val);
return (pgd_t) { ret };
}
@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
else
ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd);
ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
pgd.pgd);
return ret;
}
@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
val);
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
val);
return (pmd_t) { ret };
}
@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
pmd.pmd);
return ret;
}
@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
val, (u64)val >> 32);
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
val, (u64)val >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
val);
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
val);
return (pud_t) { ret };
}
@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
pud.pud, (u64)pud.pud >> 32);
ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
pud.pud, (u64)pud.pud >> 32);
else
ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
pud.pud);
ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
pud.pud);
return ret;
}
@ -1387,6 +1466,9 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
}
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop)
void paravirt_use_bytelocks(void);
@ -1438,12 +1520,37 @@ extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS \
"push %rcx;" \
"push %rdx;" \
"push %rsi;" \
"push %rdi;" \
"push %r8;" \
"push %r9;" \
"push %r10;" \
"push %r11;"
#define PV_RESTORE_ALL_CALLER_REGS \
"pop %r11;" \
"pop %r10;" \
"pop %r9;" \
"pop %r8;" \
"pop %rdi;" \
"pop %rsi;" \
"pop %rdx;" \
"pop %rcx;"
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
@ -1453,52 +1560,76 @@ extern struct paravirt_patch_site __parainstructions[],
#define PV_FLAGS_ARG "D"
#endif
/*
* Generate a thunk around a function which saves all caller-save
* registers except for the return value. This allows C functions to
* be called from assembler code where fewer than normal registers are
* available. It may also help code generation around calls from C
* code if the common case doesn't use many registers.
*
* When a callee is wrapped in a thunk, the caller can assume that all
* arg regs and all scratch registers are preserved across the
* call. The return value in rax/eax will not be saved, even for void
* functions.
*/
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \
static void *__##func##__ __used = func; \
\
asm(".pushsection .text;" \
"__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \
PV_RESTORE_ALL_CALLER_REGS \
"ret;" \
".popsection")
/* Get a reference to a callee-save function */
#define PV_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { __raw_callee_save_##func })
/* Promise that "func" already uses the right calling convention */
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_VEXTRA_CLOBBERS);
: "memory", "cc");
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc" PV_EXTRA_CLOBBERS);
: "memory", "cc");
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
: "memory", "eax", "cc");
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PV_SAVE_REGS
PARAVIRT_CALL
PV_RESTORE_REGS)
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
: "memory", "eax", "cc");
}
static inline unsigned long __raw_local_irq_save(void)
@ -1541,33 +1672,49 @@ static inline unsigned long __raw_local_irq_save(void)
.popsection
#define COND_PUSH(set, mask, reg) \
.if ((~(set)) & mask); push %reg; .endif
#define COND_POP(set, mask, reg) \
.if ((~(set)) & mask); pop %reg; .endif
#ifdef CONFIG_X86_64
#define PV_SAVE_REGS \
push %rax; \
push %rcx; \
push %rdx; \
push %rsi; \
push %rdi; \
push %r8; \
push %r9; \
push %r10; \
push %r11
#define PV_RESTORE_REGS \
pop %r11; \
pop %r10; \
pop %r9; \
pop %r8; \
pop %rdi; \
pop %rsi; \
pop %rdx; \
pop %rcx; \
pop %rax
#define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_RAX, rax); \
COND_PUSH(set, CLBR_RCX, rcx); \
COND_PUSH(set, CLBR_RDX, rdx); \
COND_PUSH(set, CLBR_RSI, rsi); \
COND_PUSH(set, CLBR_RDI, rdi); \
COND_PUSH(set, CLBR_R8, r8); \
COND_PUSH(set, CLBR_R9, r9); \
COND_PUSH(set, CLBR_R10, r10); \
COND_PUSH(set, CLBR_R11, r11)
#define PV_RESTORE_REGS(set) \
COND_POP(set, CLBR_R11, r11); \
COND_POP(set, CLBR_R10, r10); \
COND_POP(set, CLBR_R9, r9); \
COND_POP(set, CLBR_R8, r8); \
COND_POP(set, CLBR_RDI, rdi); \
COND_POP(set, CLBR_RSI, rsi); \
COND_POP(set, CLBR_RDX, rdx); \
COND_POP(set, CLBR_RCX, rcx); \
COND_POP(set, CLBR_RAX, rax)
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
#else
#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
#define PV_SAVE_REGS(set) \
COND_PUSH(set, CLBR_EAX, eax); \
COND_PUSH(set, CLBR_EDI, edi); \
COND_PUSH(set, CLBR_ECX, ecx); \
COND_PUSH(set, CLBR_EDX, edx)
#define PV_RESTORE_REGS(set) \
COND_POP(set, CLBR_EDX, edx); \
COND_POP(set, CLBR_ECX, ecx); \
COND_POP(set, CLBR_EDI, edi); \
COND_POP(set, CLBR_EAX, eax)
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#define PARA_INDIRECT(addr) *%cs:addr
@ -1579,15 +1726,15 @@ static inline unsigned long __raw_local_irq_save(void)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
PV_SAVE_REGS; \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS;) \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
PV_SAVE_REGS; \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS;)
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define USERGS_SYSRET32 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@ -1617,11 +1764,15 @@ static inline unsigned long __raw_local_irq_save(void)
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
swapgs)
/*
* Note: swapgs is very special, and in practise is either going to be
* implemented with a single "swapgs" instruction or something very
* special. Either way, we don't need to save any registers for
* it.
*/
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
PV_SAVE_REGS; \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
PV_RESTORE_REGS \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
)
#define GET_CR2_INTO_RCX \

Просмотреть файл

@ -1,137 +0,0 @@
#ifndef _ASM_X86_PDA_H
#define _ASM_X86_PDA_H
#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <asm/page.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
struct task_struct *pcurrent; /* 0 Current process */
unsigned long data_offset; /* 8 Per cpu data offset from linker
address */
unsigned long kernelstack; /* 16 top of kernel stack for current */
unsigned long oldrsp; /* 24 user rsp for system call */
int irqcount; /* 32 Irq nesting counter. Starts -1 */
unsigned int cpunumber; /* 36 Logical CPU number */
#ifdef CONFIG_CC_STACKPROTECTOR
unsigned long stack_canary; /* 40 stack canary value */
/* gcc-ABI: this canary MUST be at
offset 40!!! */
#endif
char *irqstackptr;
short nodenumber; /* number of current node (32k max) */
short in_bootmem; /* pda lives in bootmem */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
short mmu_state;
short isidle;
struct mm_struct *active_mm;
unsigned apic_timer_irqs;
unsigned irq0_irqs;
unsigned irq_resched_count;
unsigned irq_call_count;
unsigned irq_tlb_count;
unsigned irq_thermal_count;
unsigned irq_threshold_count;
unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;
extern struct x8664_pda **_cpu_pda;
extern void pda_init(int);
#define cpu_pda(i) (_cpu_pda[i])
/*
* There is no fast way to get the base address of the PDA, all the accesses
* have to mention %fs/%gs. So it needs to be done this Torvaldian way.
*/
extern void __bad_pda_field(void) __attribute__((noreturn));
/*
* proxy_pda doesn't actually exist, but tell gcc it is accessed for
* all PDA accesses so it gets read/write dependencies right.
*/
extern struct x8664_pda _proxy_pda;
#define pda_offset(field) offsetof(struct x8664_pda, field)
#define pda_to_op(op, field, val) \
do { \
typedef typeof(_proxy_pda.field) T__; \
if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
case 4: \
asm(op "l %1,%%gs:%c2" : \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i" (pda_offset(field))); \
break; \
case 8: \
asm(op "q %1,%%gs:%c2": \
"+m" (_proxy_pda.field) : \
"ri" ((T__)val), \
"i"(pda_offset(field))); \
break; \
default: \
__bad_pda_field(); \
} \
} while (0)
#define pda_from_op(op, field) \
({ \
typeof(_proxy_pda.field) ret__; \
switch (sizeof(_proxy_pda.field)) { \
case 2: \
asm(op "w %%gs:%c1,%0" : \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 4: \
asm(op "l %%gs:%c1,%0": \
"=r" (ret__): \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
case 8: \
asm(op "q %%gs:%c1,%0": \
"=r" (ret__) : \
"i" (pda_offset(field)), \
"m" (_proxy_pda.field)); \
break; \
default: \
__bad_pda_field(); \
} \
ret__; \
})
#define read_pda(field) pda_from_op("mov", field)
#define write_pda(field, val) pda_to_op("mov", field, val)
#define add_pda(field, val) pda_to_op("add", field, val)
#define sub_pda(field, val) pda_to_op("sub", field, val)
#define or_pda(field, val) pda_to_op("or", field, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit, field) \
({ \
int old__; \
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (_proxy_pda.field) \
: "dIr" (bit), "i" (pda_offset(field)) : "memory");\
old__; \
})
#endif
#define PDA_STACKOFFSET (5*8)
#endif /* _ASM_X86_PDA_H */

Просмотреть файл

@ -2,53 +2,12 @@
#define _ASM_X86_PERCPU_H
#ifdef CONFIG_X86_64
#include <linux/compiler.h>
/* Same as asm-generic/percpu.h, except that we store the per cpu offset
in the PDA. Longer term the PDA and every per cpu variable
should be just put into a single section and referenced directly
from %gs */
#ifdef CONFIG_SMP
#include <asm/pda.h>
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset read_pda(data_offset)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __percpu_seg gs
#define __percpu_mov_op movq
#else
#define __percpu_seg fs
#define __percpu_mov_op movl
#endif
#include <asm-generic/percpu.h>
DECLARE_PER_CPU(struct x8664_pda, pda);
/*
* These are supposed to be implemented as a single instruction which
* operates on the per-cpu data base segment. x86-64 doesn't have
* that yet, so this is a fairly inefficient workaround for the
* meantime. The single instruction is atomic with respect to
* preemption and interrupts, so we need to explicitly disable
* interrupts here to achieve the same effect. However, because it
* can be used from within interrupt-disable/enable, we can't actually
* disable interrupts; disabling preemption is enough.
*/
#define x86_read_percpu(var) \
({ \
typeof(per_cpu_var(var)) __tmp; \
preempt_disable(); \
__tmp = __get_cpu_var(var); \
preempt_enable(); \
__tmp; \
})
#define x86_write_percpu(var, val) \
do { \
preempt_disable(); \
__get_cpu_var(var) = (val); \
preempt_enable(); \
} while(0)
#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__
@ -65,47 +24,26 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#ifdef CONFIG_SMP
#define PER_CPU(var, reg) \
movl %fs:per_cpu__##this_cpu_off, reg; \
#define PER_CPU(var, reg) \
__percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
lea per_cpu__##var(reg), reg
#define PER_CPU_VAR(var) %fs:per_cpu__##var
#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
#else /* ! SMP */
#define PER_CPU(var, reg) \
movl $per_cpu__##var, reg
#define PER_CPU(var, reg) \
__percpu_mov_op $per_cpu__##var, reg
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
#else /* ...!ASSEMBLY */
/*
* PER_CPU finds an address of a per-cpu variable.
*
* Args:
* var - variable name
* cpu - 32bit register containing the current CPU number
*
* The resulting address is stored in the "cpu" argument.
*
* Example:
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#include <linux/stringify.h>
#ifdef CONFIG_SMP
#define __my_cpu_offset x86_read_percpu(this_cpu_off)
/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
#define __percpu_seg "%%fs:"
#else /* !SMP */
#define __percpu_seg ""
#endif /* SMP */
#include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off)
#else
#define __percpu_arg(x) "%" #x
#endif
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
@ -120,20 +58,25 @@ do { \
} \
switch (sizeof(var)) { \
case 1: \
asm(op "b %1,"__percpu_seg"%0" \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 2: \
asm(op "w %1,"__percpu_seg"%0" \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 4: \
asm(op "l %1,"__percpu_seg"%0" \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
: "re" ((T__)val)); \
break; \
default: __bad_percpu_size(); \
} \
} while (0)
@ -143,17 +86,22 @@ do { \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_seg"%1,%0" \
asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 2: \
asm(op "w "__percpu_seg"%1,%0" \
asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
asm(op "l "__percpu_seg"%1,%0" \
asm(op "l "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
@ -162,13 +110,30 @@ do { \
ret__; \
})
#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
int old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (per_cpu__##var) \
: "dIr" (bit)); \
old__; \
})
#include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
#endif /* !__ASSEMBLY__ */
#endif /* !CONFIG_X86_64 */
#ifdef CONFIG_SMP
@ -195,9 +160,9 @@ do { \
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
(early_per_cpu_ptr(_name) ? \
early_per_cpu_ptr(_name)[_cpu] : \
per_cpu(_name, _cpu))
*(early_per_cpu_ptr(_name) ? \
&early_per_cpu_ptr(_name)[_cpu] : \
&per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \

Просмотреть файл

@ -242,64 +242,78 @@ static inline int pmd_large(pmd_t pte)
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
return pte_clear_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
return pte_clear_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_RW);
return pte_clear_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_NX);
return pte_clear_flags(pte, _PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_DIRTY);
return pte_set_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_ACCESSED);
return pte_set_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_mkwrite(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_RW);
return pte_set_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_PSE);
return pte_set_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_clrhuge(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_PSE);
return pte_clear_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_GLOBAL);
return pte_set_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_clrglobal(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
return pte_clear_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPECIAL);
return pte_set_flags(pte, _PAGE_SPECIAL);
}
extern pteval_t __supported_pte_mask;

Просмотреть файл

@ -11,7 +11,6 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <asm/pda.h>
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];

Просмотреть файл

@ -378,6 +378,22 @@ union thread_xstate {
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
union irq_stack_union {
char irq_stack[IRQ_STACK_SIZE];
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
struct {
char gs_base[40];
unsigned long stack_canary;
};
};
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
#endif
extern void print_cpu_info(struct cpuinfo_x86 *);
@ -752,9 +768,9 @@ extern int sysenter_setup(void);
extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void switch_to_new_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
extern void init_gdt(int cpu);
static inline unsigned long get_debugctlmsr(void)
{

Просмотреть файл

@ -100,7 +100,6 @@ extern unsigned long init_pg_tables_start;
extern unsigned long init_pg_tables_end;
#else
void __init x86_64_init_pda(void);
void __init x86_64_start_kernel(char *real_mode);
void __init x86_64_start_reservations(char *real_mode_data);

Просмотреть файл

@ -15,34 +15,8 @@
# include <asm/io_apic.h>
# endif
#endif
#include <asm/pda.h>
#include <asm/thread_info.h>
#ifdef CONFIG_X86_64
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
#else /* CONFIG_X86_32 */
extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
#endif /* CONFIG_X86_32 */
extern void (*mtrr_hook)(void);
extern void zap_low_mappings(void);
extern int __cpuinit get_local_pda(int cpu);
#include <asm/cpumask.h>
extern int smp_num_siblings;
extern unsigned int num_processors;
@ -50,9 +24,7 @@ extern unsigned int num_processors;
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(int, cpu_number);
#endif
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@ -167,8 +139,6 @@ void play_dead_common(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
extern void prefill_possible_map(void);
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
#else
static inline void prefill_possible_map(void)
{
}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata;
@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
#define raw_smp_processor_id() (percpu_read(cpu_number))
extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
#define raw_smp_processor_id() read_pda(cpunumber)
#define raw_smp_processor_id() (percpu_read(cpu_number))
#define stack_smp_processor_id() \
({ \
@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
})
#define safe_smp_processor_id() smp_processor_id()
#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@ -251,11 +213,5 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */

Просмотреть файл

@ -0,0 +1,38 @@
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
#include <asm/tsc.h>
#include <asm/processor.h>
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
u64 canary;
u64 tsc;
/*
* Build time only check to make sure the stack_canary is at
* offset 40 in the pda; this is a gcc ABI requirement
*/
BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
/*
* We both use the random pool and the current TSC as a source
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
*/
get_random_bytes(&canary, sizeof(canary));
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
current->stack_canary = canary;
percpu_write(irq_stack_union.stack_canary, canary);
}
#endif

Просмотреть файл

@ -86,27 +86,44 @@ do { \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
"r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
"movq %P[task_canary](%%rsi),%%r8\n\t" \
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
#define __switch_canary_oparam \
, [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
#define __switch_canary
#define __switch_canary_oparam
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
"call __switch_to\n\t" \
".globl thread_return\n" \
"thread_return:\n\t" \
"movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
"jc ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
__switch_canary_oparam \
: [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[tif_fork] "i" (TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
[current_task] "m" (per_cpu_var(current_task)) \
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif

Просмотреть файл

@ -194,25 +194,21 @@ static inline struct thread_info *current_thread_info(void)
#else /* X86_32 */
#include <asm/pda.h>
#include <asm/percpu.h>
#define KERNEL_STACK_OFFSET (5*8)
/*
* macros/functions for gaining access to the thread information structure
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned long, kernel_stack);
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
return ti;
}
/* do not use in interrupt context */
static inline struct thread_info *stack_thread_info(void)
{
struct thread_info *ti;
asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
ti = (void *)(percpu_read(kernel_stack) +
KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti;
}
@ -220,8 +216,8 @@ static inline struct thread_info *stack_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
movq %gs:pda_kernelstack,reg ; \
subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
movq PER_CPU_VAR(kernel_stack),reg ; \
subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
#endif

Просмотреть файл

@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}
static inline void native_flush_tlb_others(const cpumask_t *cpumask,
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va)
{
@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
flush_tlb_mm(vma->vm_mm);
}
void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
unsigned long va);
void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
#ifdef CONFIG_X86_32
struct tlb_state {
struct mm_struct *active_mm;
int state;
char __cacheline_padding[L1_CACHE_BYTES-8];
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
void reset_lazy_tlbstate(void);
#else
static inline void reset_lazy_tlbstate(void)
{
percpu_write(cpu_tlbstate.state, 0);
percpu_write(cpu_tlbstate.active_mm, &init_mm);
}
#endif
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
#endif
static inline void flush_tlb_kernel_range(unsigned long start,
@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all();
}
extern void zap_low_mappings(void);
#endif /* _ASM_X86_TLBFLUSH_H */

Просмотреть файл

@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
static inline void setup_node_to_cpumask_map(void) { }
#else /* CONFIG_X86_64 */
/* Mappings between node number and cpus on that node. */
@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
/* Returns the number of the current Node. */
#define numa_node_id() read_pda(nodenumber)
DECLARE_PER_CPU(int, node_number);
#define numa_node_id() percpu_read(node_number)
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
extern int cpu_to_node(int cpu);
@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
/* Same function but used if called before per_cpu areas are setup */
static inline int early_cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map))
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
return per_cpu(x86_cpu_to_node_map, cpu);
return early_per_cpu(x86_cpu_to_node_map, cpu);
}
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
extern void setup_node_to_cpumask_map(void);
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@ -192,9 +194,20 @@ extern int __node_distance(int, int);
#else /* !CONFIG_NUMA */
#define numa_node_id() 0
#define cpu_to_node(cpu) 0
#define early_cpu_to_node(cpu) 0
static inline int numa_node_id(void)
{
return 0;
}
static inline int cpu_to_node(int cpu)
{
return 0;
}
static inline int early_cpu_to_node(int cpu)
{
return 0;
}
static inline const cpumask_t *cpumask_of_node(int node)
{
@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
return first_cpu(cpu_online_map);
}
static inline void setup_node_to_cpumask_map(void) { }
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"

Просмотреть файл

@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
#define TRAMPOLINE_BASE 0x6000

Просмотреть файл

@ -0,0 +1,33 @@
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#ifdef CONFIG_X86_UV
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern void uv_cpu_init(void);
extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va,
unsigned int cpu);
#else /* X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline int is_uv_system(void) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
{ return 1; }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
unsigned long va, unsigned int cpu)
{ return cpumask; }
#endif /* X86_UV */
#endif /* _ASM_X86_UV_UV_H */

Просмотреть файл

@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);

Просмотреть файл

@ -23,11 +23,12 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
CFLAGS_hpet.o := $(nostackp)
CFLAGS_tsc.o := $(nostackp)
CFLAGS_paravirt.o := $(nostackp)
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
obj-y += setup.o i8259.o irqinit_$(BITS).o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@ -57,9 +58,9 @@ obj-$(CONFIG_PCI) += early-quirks.o
apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o
obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
obj-$(CONFIG_X86_32_SMP) += smpcommon.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
@ -114,10 +115,11 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
obj-y += bios_uv.o uv_irq.o uv_sysfs.o
obj-y += genapic_64.o genapic_flat_64.o
obj-y += genx2apic_cluster.o
obj-y += genx2apic_phys.o
obj-$(CONFIG_X86_UV) += genx2apic_uv_x.o tlb_uv.o
obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o

Просмотреть файл

@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
DECLARE_BITMAP(used, 256);
bitmap_zero(used, 256);
for (i = 0; i < nr_ioapics; i++) {
struct mp_config_ioapic *ia = &mp_ioapics[i];
__set_bit(ia->mp_apicid, used);
struct mpc_ioapic *ia = &mp_ioapics[i];
__set_bit(ia->apicid, used);
}
if (!test_bit(id, used))
return id;
@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
idx = nr_ioapics;
mp_ioapics[idx].mp_type = MP_IOAPIC;
mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
mp_ioapics[idx].mp_apicaddr = address;
mp_ioapics[idx].type = MP_IOAPIC;
mp_ioapics[idx].flags = MPC_APIC_USABLE;
mp_ioapics[idx].apicaddr = address;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
mp_ioapics[idx].apicid = uniq_ioapic_id(id);
#ifdef CONFIG_X86_32
mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
mp_ioapics[idx].apicver = io_apic_get_version(idx);
#else
mp_ioapics[idx].mp_apicver = 0;
mp_ioapics[idx].apicver = 0;
#endif
/*
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
mp_ioapic_routing[idx].gsi_base = gsi_base;
mp_ioapic_routing[idx].gsi_end = gsi_base +
io_apic_get_redir_entries(idx);
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
"GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
nr_ioapics++;
}
static void assign_to_mp_irq(struct mp_config_intsrc *m,
struct mp_config_intsrc *mp_irq)
static void assign_to_mp_irq(struct mpc_intsrc *m,
struct mpc_intsrc *mp_irq)
{
memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
}
static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
struct mp_config_intsrc *m)
static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m)
{
return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
}
static void save_mp_irq(struct mp_config_intsrc *m)
static void save_mp_irq(struct mpc_intsrc *m)
{
int i;
@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
{
int ioapic;
int pin;
struct mp_config_intsrc mp_irq;
struct mpc_intsrc mp_irq;
/*
* Convert 'gsi' to 'ioapic.pin'.
@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
if ((bus_irq == 0) && (trigger == 3))
trigger = 1;
mp_irq.mp_type = MP_INTSRC;
mp_irq.mp_irqtype = mp_INT;
mp_irq.mp_irqflag = (trigger << 2) | polarity;
mp_irq.mp_srcbus = MP_ISA_BUS;
mp_irq.mp_srcbusirq = bus_irq; /* IRQ */
mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
mp_irq.mp_dstirq = pin; /* INTIN# */
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = (trigger << 2) | polarity;
mp_irq.srcbus = MP_ISA_BUS;
mp_irq.srcbusirq = bus_irq; /* IRQ */
mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
}
@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
int i;
int ioapic;
unsigned int dstapic;
struct mp_config_intsrc mp_irq;
struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
/*
@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
ioapic = mp_find_ioapic(0);
if (ioapic < 0)
return;
dstapic = mp_ioapics[ioapic].mp_apicid;
dstapic = mp_ioapics[ioapic].apicid;
/*
* Use the default configuration for the IRQs 0-15. Unless
@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
int idx;
for (idx = 0; idx < mp_irq_entries; idx++) {
struct mp_config_intsrc *irq = mp_irqs + idx;
struct mpc_intsrc *irq = mp_irqs + idx;
/* Do we already have a mapping for this ISA IRQ? */
if (irq->mp_srcbus == MP_ISA_BUS
&& irq->mp_srcbusirq == i)
if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
break;
/* Do we already have a mapping for this IOAPIC pin */
if (irq->mp_dstapic == dstapic &&
irq->mp_dstirq == i)
if (irq->dstapic == dstapic && irq->dstirq == i)
break;
}
@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
continue; /* IRQ already used */
}
mp_irq.mp_type = MP_INTSRC;
mp_irq.mp_irqflag = 0; /* Conforming */
mp_irq.mp_srcbus = MP_ISA_BUS;
mp_irq.mp_dstapic = dstapic;
mp_irq.mp_irqtype = mp_INT;
mp_irq.mp_srcbusirq = i; /* Identity mapped */
mp_irq.mp_dstirq = i;
mp_irq.type = MP_INTSRC;
mp_irq.irqflag = 0; /* Conforming */
mp_irq.srcbus = MP_ISA_BUS;
mp_irq.dstapic = dstapic;
mp_irq.irqtype = mp_INT;
mp_irq.srcbusirq = i; /* Identity mapped */
mp_irq.dstirq = i;
save_mp_irq(&mp_irq);
}
@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity)
{
#ifdef CONFIG_X86_MPPARSE
struct mp_config_intsrc mp_irq;
struct mpc_intsrc mp_irq;
int ioapic;
if (!acpi_ioapic)
return 0;
/* print the entry should happen on mptable identically */
mp_irq.mp_type = MP_INTSRC;
mp_irq.mp_irqtype = mp_INT;
mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
mp_irq.mp_srcbus = number;
mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
mp_irq.srcbus = number;
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
ioapic = mp_find_ioapic(gsi);
mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
save_mp_irq(&mp_irq);
#endif

Просмотреть файл

@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
stack_start.sp = temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());
#endif
initial_code = (unsigned long)wakeup_long64;
saved_magic = 0x123456789abcdef0;

Просмотреть файл

@ -60,6 +60,24 @@
# error SPURIOUS_APIC_VECTOR definition error
#endif
unsigned int num_processors;
unsigned disabled_cpus __cpuinitdata;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
EXPORT_SYMBOL(boot_cpu_physical_apicid);
unsigned int max_physical_apicid;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map;
/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
#ifdef CONFIG_X86_32
/*
* Knob to control our willingness to enable the local APIC.
@ -1130,6 +1148,13 @@ void __cpuinit setup_local_APIC(void)
unsigned int value;
int i, j;
if (disable_apic) {
#ifdef CONFIG_X86_IO_APIC
disable_ioapic_setup();
#endif
return;
}
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && esr_disable) {
@ -1570,11 +1595,11 @@ int apic_version[MAX_APICS];
int __init APIC_init_uniprocessor(void)
{
#ifdef CONFIG_X86_64
if (disable_apic) {
pr_info("Apic disabled\n");
return -1;
}
#ifdef CONFIG_X86_64
if (!cpu_has_apic) {
disable_apic = 1;
pr_info("Apic disabled by BIOS\n");
@ -1877,17 +1902,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
#endif
#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
/* are we being called early in kernel startup? */
if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
cpu_to_apicid[cpu] = apicid;
bios_cpu_apicid[cpu] = apicid;
} else {
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
}
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif
set_cpu_possible(cpu, true);

Просмотреть файл

@ -11,7 +11,6 @@
#include <linux/hardirq.h>
#include <linux/suspend.h>
#include <linux/kbuild.h>
#include <asm/pda.h>
#include <asm/processor.h>
#include <asm/segment.h>
#include <asm/thread_info.h>
@ -48,16 +47,6 @@ int main(void)
#endif
BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
ENTRY(kernelstack);
ENTRY(oldrsp);
ENTRY(pcurrent);
ENTRY(irqcount);
ENTRY(cpunumber);
ENTRY(irqstackptr);
ENTRY(data_offset);
BLANK();
#undef ENTRY
#ifdef CONFIG_PARAVIRT
BLANK();
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);

Просмотреть файл

@ -21,14 +21,16 @@
#include <asm/asm.h>
#include <asm/numa.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <mach_apic.h>
#include <asm/genapic.h>
#include <asm/uv/uv.h>
#endif
#include <asm/pda.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/desc.h>
@ -50,6 +52,15 @@ cpumask_var_t cpu_initialized_mask;
/* representing cpus for which sibling maps can be computed */
cpumask_var_t cpu_sibling_setup_mask;
/* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void)
{
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
alloc_bootmem_cpumask_var(&cpu_callin_mask);
alloc_bootmem_cpumask_var(&cpu_callout_mask);
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
#else /* CONFIG_X86_32 */
cpumask_t cpu_callin_map;
@ -62,23 +73,23 @@ cpumask_t cpu_sibling_setup_map;
static struct cpu_dev *this_cpu __cpuinitdata;
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
/* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
*/
/* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */
DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
/*
* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
*
* The TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?)
*/
[GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
[GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
[GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
} };
#else
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
[GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
@ -110,9 +121,9 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
[GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
} };
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
#endif
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
#ifdef CONFIG_X86_32
@ -242,18 +253,28 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
void load_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
loadsegment(fs, __KERNEL_PERCPU);
#else
loadsegment(gs, 0);
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
}
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.address = (long)get_cpu_gdt_table(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
#ifdef CONFIG_X86_32
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
#endif
/* Reload the per-cpu base */
load_percpu_segment(cpu);
}
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
@ -877,54 +898,26 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
struct x8664_pda **_cpu_pda __read_mostly;
EXPORT_SYMBOL(_cpu_pda);
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
#else
DEFINE_PER_CPU(char *, irq_stack_ptr) =
per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
#endif
void __cpuinit pda_init(int cpu)
{
struct x8664_pda *pda = cpu_pda(cpu);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(kernel_stack);
/* Setup up data that may be needed in __get_free_pages early */
loadsegment(fs, 0);
loadsegment(gs, 0);
/* Memory clobbers used to order PDA accessed */
mb();
wrmsrl(MSR_GS_BASE, pda);
mb();
DEFINE_PER_CPU(unsigned int, irq_count) = -1;
pda->cpunumber = cpu;
pda->irqcount = -1;
pda->kernelstack = (unsigned long)stack_thread_info() -
PDA_STACKOFFSET + THREAD_SIZE;
pda->active_mm = &init_mm;
pda->mmu_state = 0;
if (cpu == 0) {
/* others are initialized in smpboot.c */
pda->pcurrent = &init_task;
pda->irqstackptr = boot_cpu_stack;
pda->irqstackptr += IRQSTACKSIZE - 64;
} else {
if (!pda->irqstackptr) {
pda->irqstackptr = (char *)
__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
if (!pda->irqstackptr)
panic("cannot allocate irqstack for cpu %d",
cpu);
pda->irqstackptr += IRQSTACKSIZE - 64;
}
if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
pda->nodenumber = cpu_to_node(cpu);
}
}
static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
DEBUG_STKSZ] __page_aligned_bss;
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
__aligned(PAGE_SIZE);
extern asmlinkage void ignore_sysret(void);
@ -982,15 +975,14 @@ void __cpuinit cpu_init(void)
struct tss_struct *t = &per_cpu(init_tss, cpu);
struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
unsigned long v;
char *estacks = NULL;
struct task_struct *me;
int i;
/* CPU 0 is initialised in head64.c */
if (cpu != 0)
pda_init(cpu);
else
estacks = boot_exception_stacks;
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
cpu_to_node(cpu) != NUMA_NO_NODE)
percpu_write(node_number, cpu_to_node(cpu));
#endif
me = current;
@ -1006,7 +998,9 @@ void __cpuinit cpu_init(void)
* and set up the GDT descriptor:
*/
switch_to_new_gdt();
switch_to_new_gdt(cpu);
loadsegment(fs, 0);
load_idt((const struct desc_ptr *)&idt_descr);
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@ -1024,18 +1018,13 @@ void __cpuinit cpu_init(void)
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
static const unsigned int order[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
[DEBUG_STACK - 1] = DEBUG_STACK_ORDER
static const unsigned int sizes[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
[DEBUG_STACK - 1] = DEBUG_STKSZ
};
char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
if (cpu) {
estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
if (!estacks)
panic("Cannot allocate exception "
"stack %ld %d\n", v, cpu);
}
estacks += PAGE_SIZE << order[v];
estacks += sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
@ -1114,7 +1103,7 @@ void __cpuinit cpu_init(void)
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_idt(&idt_descr);
switch_to_new_gdt();
switch_to_new_gdt(cpu);
/*
* Set up and load the per-CPU TSS and LDT

Просмотреть файл

@ -147,7 +147,16 @@ struct _cpuid4_info {
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
};
#ifdef CONFIG_PCI
@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
}
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
if (index < 3)
return;
@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
__cpuinit cpuid4_cache_lookup_regs(int index,
struct _cpuid4_info_regs *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
return 0;
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
struct _cpuid4_info_regs *leaf_regs =
(struct _cpuid4_info_regs *)this_leaf;
return cpuid4_cache_lookup_regs(index, leaf_regs);
}
static int __cpuinit find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
* parameters cpuid leaf to find the cache details
*/
for (i = 0; i < num_cache_leaves; i++) {
struct _cpuid4_info this_leaf;
struct _cpuid4_info_regs this_leaf;
int retval;
retval = cpuid4_cache_lookup(i, &this_leaf);
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
if (retval >= 0) {
switch(this_leaf.eax.split.level) {
case 1:
@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1)
cpu_set(cpu, this_leaf->shared_cpu_map);
cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
else {
index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) {
if (cpu_data(i).apicid >> index_msb ==
c->apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map);
cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
if (i != cpu && per_cpu(cpuid4_info, i)) {
sibling_leaf = CPUID4_INFO_IDX(i, index);
cpu_set(cpu, sibling_leaf->shared_cpu_map);
sibling_leaf =
CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
sibling_leaf->shared_cpu_map));
}
}
}
@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
cpumask_clear_cpu(cpu,
to_cpumask(sibling_leaf->shared_cpu_map));
}
}
#else
@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
int n = 0;
if (len > 1) {
cpumask_t *mask = &this_leaf->shared_cpu_map;
const struct cpumask *mask;
mask = to_cpumask(this_leaf->shared_cpu_map);
n = type?
cpulist_scnprintf(buf, len-2, mask) :
cpumask_scnprintf(buf, len-2, mask);
@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
ssize_t ret = 0;
int i;
@ -733,7 +757,8 @@ static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
unsigned int ret, index, val;
@ -878,7 +903,7 @@ err_out:
return -ENOMEM;
}
static cpumask_t cache_dev_map = CPU_MASK_NONE;
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
cpu_set(cpu, cache_dev_map);
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
return 0;
@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (per_cpu(cpuid4_info, cpu) == NULL)
return;
if (!cpu_isset(cpu, cache_dev_map))
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
cpu_clear(cpu, cache_dev_map);
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));

Просмотреть файл

@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
cpumask_t cpus;
cpumask_var_t cpus;
};
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = first_cpu(per_cpu(cpu_core_map, cpu));
i = cpumask_first(&per_cpu(cpu_core_map, cpu));
/* first core not up yet */
if (cpu_data(i).cpu_core_id)
@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
b->cpus = per_cpu(cpu_core_map, cpu);
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
}
b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
if (!b->kobj)
goto out_free;
#ifndef CONFIG_SMP
b->cpus = CPU_MASK_ALL;
cpumask_setall(b->cpus);
#else
b->cpus = per_cpu(cpu_core_map, cpu);
cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out_free;
for_each_cpu_mask_nr(i, b->cpus) {
for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
out_free:
per_cpu(threshold_banks, cpu)[bank] = NULL;
free_cpumask_var(b->cpus);
kfree(b);
out:
return err;
@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#endif
/* remove all sibling symlinks before unregistering */
for_each_cpu_mask_nr(i, b->cpus) {
for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out:
kobject_del(b->kobj);
kobject_put(b->kobj);
free_cpumask_var(b->cpus);
kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
}

Просмотреть файл

@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>

Просмотреть файл

@ -24,7 +24,7 @@
#include <asm/apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>

Просмотреть файл

@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
unsigned long *irq_stack_end =
(unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned used = 0;
struct thread_info *tinfo;
int graph = 0;
@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
stack = (unsigned long *) estack_end[-2];
continue;
}
if (irqstack_end) {
unsigned long *irqstack;
irqstack = irqstack_end -
(IRQSTACKSIZE - 64) / sizeof(*irqstack);
if (irq_stack_end) {
unsigned long *irq_stack;
irq_stack = irq_stack_end -
(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
if (stack >= irqstack && stack < irqstack_end) {
if (stack >= irq_stack && stack < irq_stack_end) {
if (ops->stack(data, "IRQ") < 0)
break;
bp = print_context_stack(tinfo, stack, bp,
ops, data, irqstack_end, &graph);
ops, data, irq_stack_end, &graph);
/*
* We link to the next stack (which would be
* the process stack normally) the last
* pointer (index -1 to end) in the IRQ stack:
*/
stack = (unsigned long *) (irqstack_end[-1]);
irqstack_end = NULL;
stack = (unsigned long *) (irq_stack_end[-1]);
irq_stack_end = NULL;
ops->stack(data, "EOI");
continue;
}
@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack;
int i;
const int cpu = smp_processor_id();
unsigned long *irqstack_end =
(unsigned long *) (cpu_pda(cpu)->irqstackptr);
unsigned long *irqstack =
(unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
unsigned long *irq_stack_end =
(unsigned long *)(per_cpu(irq_stack_ptr, cpu));
unsigned long *irq_stack =
(unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
/*
* debugging aid: "show_stack(NULL, NULL);" prints the
@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (stack >= irqstack && stack <= irqstack_end) {
if (stack == irqstack_end) {
stack = (unsigned long *) (irqstack_end[-1]);
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
printk(" <EOI> ");
}
} else {
@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
int i;
unsigned long sp;
const int cpu = smp_processor_id();
struct task_struct *cur = cpu_pda(cpu)->pcurrent;
struct task_struct *cur = current;
sp = regs->sp;
printk("CPU %d ", cpu);

Просмотреть файл

@ -366,10 +366,12 @@ void __init efi_init(void)
SMBIOS_TABLE_GUID)) {
efi.smbios = config_tables[i].table;
printk(" SMBIOS=0x%lx ", config_tables[i].table);
#ifdef CONFIG_X86_UV
} else if (!efi_guidcmp(config_tables[i].guid,
UV_SYSTEM_TABLE_GUID)) {
efi.uv_systab = config_tables[i].table;
printk(" UVsystab=0x%lx ", config_tables[i].table);
#endif
} else if (!efi_guidcmp(config_tables[i].guid,
HCDP_TABLE_GUID)) {
efi.hcdp = config_tables[i].table;

Просмотреть файл

@ -36,6 +36,7 @@
#include <asm/proto.h>
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
static pgd_t save_pgd __initdata;
static unsigned long efi_flags __initdata;

Просмотреть файл

@ -672,7 +672,7 @@ common_interrupt:
ENDPROC(common_interrupt)
CFI_ENDPROC
#define BUILD_INTERRUPT(name, nr) \
#define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \
RING0_INT_FRAME; \
pushl $~(nr); \
@ -680,11 +680,13 @@ ENTRY(name) \
SAVE_ALL; \
TRACE_IRQS_OFF \
movl %esp,%eax; \
call smp_##name; \
call fn; \
jmp ret_from_intr; \
CFI_ENDPROC; \
ENDPROC(name)
#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
/* The include is where all of the SMP etc. interrupts come from */
#include "entry_arch.h"

Просмотреть файл

@ -52,6 +52,7 @@
#include <asm/irqflags.h>
#include <asm/paravirt.h>
#include <asm/ftrace.h>
#include <asm/percpu.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
/* %rsp:at FRAMEEND */
.macro FIXUP_TOP_OF_STACK tmp offset=0
movq %gs:pda_oldrsp,\tmp
movq PER_CPU_VAR(old_rsp),\tmp
movq \tmp,RSP+\offset(%rsp)
movq $__USER_DS,SS+\offset(%rsp)
movq $__USER_CS,CS+\offset(%rsp)
@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
.macro RESTORE_TOP_OF_STACK tmp offset=0
movq RSP+\offset(%rsp),\tmp
movq \tmp,%gs:pda_oldrsp
movq \tmp,PER_CPU_VAR(old_rsp)
movq EFLAGS+\offset(%rsp),\tmp
movq \tmp,R11+\offset(%rsp)
.endm
@ -336,15 +337,15 @@ ENTRY(save_args)
je 1f
SWAPGS
/*
* irqcount is used to check if a CPU is already on an interrupt stack
* irq_count is used to check if a CPU is already on an interrupt stack
* or not. While this is essentially redundant with preempt_count it is
* a little cheaper to use a separate counter in the PDA (short of
* moving irq_enter into assembly, which would be too much work)
*/
1: incl %gs:pda_irqcount
1: incl PER_CPU_VAR(irq_count)
jne 2f
popq_cfi %rax /* move return address... */
mov %gs:pda_irqstackptr,%rsp
mov PER_CPU_VAR(irq_stack_ptr),%rsp
EMPTY_FRAME 0
pushq_cfi %rbp /* backlink for unwinder */
pushq_cfi %rax /* ... to the new stack */
@ -468,7 +469,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
@ -479,8 +480,8 @@ ENTRY(system_call)
*/
ENTRY(system_call_after_swapgs)
movq %rsp,%gs:pda_oldrsp
movq %gs:pda_kernelstack,%rsp
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
/*
* No need to follow this irqs off/on section - it's straight
* and short:
@ -523,7 +524,7 @@ sysret_check:
CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1
/*CFI_REGISTER rflags,r11*/
movq %gs:pda_oldrsp, %rsp
movq PER_CPU_VAR(old_rsp), %rsp
USERGS_SYSRET64
CFI_RESTORE_STATE
@ -833,11 +834,11 @@ common_interrupt:
XCPT_FRAME
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */
/* 0(%rsp): old_rsp-ARGOFFSET */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
decl %gs:pda_irqcount
decl PER_CPU_VAR(irq_count)
leaveq
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
@ -982,8 +983,10 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
#endif
#ifdef CONFIG_X86_UV
apicinterrupt UV_BAU_MESSAGE \
uv_bau_message_intr1 uv_bau_message_interrupt
#endif
apicinterrupt LOCAL_TIMER_VECTOR \
apic_timer_interrupt smp_apic_timer_interrupt
@ -1073,10 +1076,10 @@ ENTRY(\sym)
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
movq %gs:pda_data_offset, %rbp
subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
PER_CPU(init_tss, %rbp)
subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
call \do_sym
addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
jmp paranoid_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
END(\sym)
@ -1138,7 +1141,7 @@ ENTRY(native_load_gs_index)
CFI_STARTPROC
pushf
CFI_ADJUST_CFA_OFFSET 8
DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS
gs_change:
movl %edi,%gs
@ -1260,14 +1263,14 @@ ENTRY(call_softirq)
CFI_REL_OFFSET rbp,0
mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
incl %gs:pda_irqcount
cmove %gs:pda_irqstackptr,%rsp
incl PER_CPU_VAR(irq_count)
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
push %rbp # backlink for old unwinder
call __do_softirq
leaveq
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl %gs:pda_irqcount
decl PER_CPU_VAR(irq_count)
ret
CFI_ENDPROC
END(call_softirq)
@ -1297,15 +1300,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
DEFAULT_FRAME
11: incl %gs:pda_irqcount
11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
cmovzq %gs:pda_irqstackptr,%rsp
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall
popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl %gs:pda_irqcount
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
END(do_hypervisor_callback)

Просмотреть файл

@ -32,7 +32,9 @@ extern struct genapic apic_x2apic_cluster;
struct genapic __read_mostly *genapic = &apic_flat;
static struct genapic *apic_probe[] __initdata = {
#ifdef CONFIG_X86_UV
&apic_x2apic_uv_x,
#endif
&apic_x2apic_phys,
&apic_x2apic_cluster,
&apic_physflat,

Просмотреть файл

@ -25,6 +25,7 @@
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <asm/pgtable.h>
#include <asm/uv/uv.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>

Просмотреть файл

@ -26,27 +26,6 @@
#include <asm/bios_ebda.h>
#include <asm/trampoline.h>
/* boot cpu pda */
static struct x8664_pda _boot_cpu_pda;
#ifdef CONFIG_SMP
/*
* We install an empty cpu_pda pointer table to indicate to early users
* (numa_set_node) that the cpu_pda pointer table for cpus other than
* the boot cpu is not yet setup.
*/
static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
#else
static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
#endif
void __init x86_64_init_pda(void)
{
_cpu_pda = __cpu_pda;
cpu_pda(0) = &_boot_cpu_pda;
pda_init(0);
}
static void __init zap_identity_mappings(void)
{
pgd_t *pgd = pgd_offset_k(0UL);
@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
if (console_loglevel == 10)
early_printk("Kernel alive\n");
x86_64_init_pda();
x86_64_start_reservations(real_mode_data);
}

Просмотреть файл

@ -429,12 +429,14 @@ is386: movl $2,%ecx # set MP
ljmp $(__KERNEL_CS),$1f
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
movl %eax,%ss # after changing gdt.
movl %eax,%fs # gets reset once there's real percpu
movl $(__USER_DS),%eax # DS/ES contains default USER segment
movl %eax,%ds
movl %eax,%es
movl $(__KERNEL_PERCPU), %eax
movl %eax,%fs # set this cpu's percpu
xorl %eax,%eax # Clear GS and LDT
movl %eax,%gs
lldt %ax
@ -446,8 +448,6 @@ is386: movl $2,%ecx # set MP
movb $1, ready
cmpb $0,%cl # the first CPU calls start_kernel
je 1f
movl $(__KERNEL_PERCPU), %eax
movl %eax,%fs # set this cpu's percpu
movl (stack_start), %esp
1:
#endif /* CONFIG_SMP */

Просмотреть файл

@ -19,6 +19,7 @@
#include <asm/msr.h>
#include <asm/cache.h>
#include <asm/processor-flags.h>
#include <asm/percpu.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@ -204,6 +205,19 @@ ENTRY(secondary_startup_64)
pushq $0
popfq
#ifdef CONFIG_SMP
/*
* Fix up static pointers that need __per_cpu_load added. The assembler
* is unable to do this directly. This is only needed for the boot cpu.
* These values are set up with the correct base addresses by C code for
* secondary cpus.
*/
movq initial_gs(%rip), %rax
cmpl $0, per_cpu__cpu_number(%rax)
jne 1f
addq %rax, early_gdt_descr_base(%rip)
1:
#endif
/*
* We must switch to a new descriptor in kernel space for the GDT
* because soon the kernel won't have access anymore to the userspace
@ -226,12 +240,15 @@ ENTRY(secondary_startup_64)
movl %eax,%fs
movl %eax,%gs
/*
* Setup up a dummy PDA. this is just for some early bootup code
* that does in_interrupt()
*/
/* Set up %gs.
*
* The base of %gs always points to the bottom of the irqstack
* union. If the stack protector canary is enabled, it is
* located at %gs:40. Note that, on SMP, the boot cpu uses
* init data section till per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
movq $empty_zero_page,%rax
movq initial_gs(%rip),%rax
movq %rax,%rdx
shrq $32,%rdx
wrmsr
@ -257,6 +274,12 @@ ENTRY(secondary_startup_64)
.align 8
ENTRY(initial_code)
.quad x86_64_start_kernel
ENTRY(initial_gs)
#ifdef CONFIG_SMP
.quad __per_cpu_load
#else
.quad PER_CPU_VAR(irq_stack_union)
#endif
__FINITDATA
ENTRY(stack_start)
@ -401,7 +424,8 @@ NEXT_PAGE(level2_spare_pgt)
.globl early_gdt_descr
early_gdt_descr:
.word GDT_ENTRIES*8-1
.quad per_cpu__gdt_page
early_gdt_descr_base:
.quad per_cpu__gdt_page
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */

Просмотреть файл

@ -46,6 +46,7 @@
#include <asm/idle.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/acpi.h>
@ -82,11 +83,11 @@ static DEFINE_SPINLOCK(vector_lock);
int nr_ioapic_registers[MAX_IO_APICS];
/* I/O APIC entries */
struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
int nr_ioapics;
/* MP IRQ source entries */
struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* # of MP IRQ source entries */
int mp_irq_entries;
@ -356,7 +357,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
if (!cfg->move_in_progress) {
/* it means that domain is not changed */
if (!cpumask_intersects(&desc->affinity, mask))
if (!cpumask_intersects(desc->affinity, mask))
cfg->move_desc_pending = 1;
}
}
@ -386,7 +387,7 @@ struct io_apic {
static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{
return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+ (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
+ (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
}
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@ -579,9 +580,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_and(&desc->affinity, cfg->domain, mask);
cpumask_and(desc->affinity, cfg->domain, mask);
set_extra_move_desc(desc, mask);
return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
}
static void
@ -944,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type)
int i;
for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_irqtype == type &&
(mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
mp_irqs[i].mp_dstirq == pin)
if (mp_irqs[i].irqtype == type &&
(mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
mp_irqs[i].dstapic == MP_APIC_ALL) &&
mp_irqs[i].dstirq == pin)
return i;
return -1;
@ -961,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type)
int i;
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus;
int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) &&
(mp_irqs[i].mp_irqtype == type) &&
(mp_irqs[i].mp_srcbusirq == irq))
(mp_irqs[i].irqtype == type) &&
(mp_irqs[i].srcbusirq == irq))
return mp_irqs[i].mp_dstirq;
return mp_irqs[i].dstirq;
}
return -1;
}
@ -977,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type)
int i;
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus;
int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) &&
(mp_irqs[i].mp_irqtype == type) &&
(mp_irqs[i].mp_srcbusirq == irq))
(mp_irqs[i].irqtype == type) &&
(mp_irqs[i].srcbusirq == irq))
break;
}
if (i < mp_irq_entries) {
int apic;
for(apic = 0; apic < nr_ioapics; apic++) {
if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
return apic;
}
}
@ -1012,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
return -1;
}
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus;
int lbus = mp_irqs[i].srcbus;
for (apic = 0; apic < nr_ioapics; apic++)
if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
mp_irqs[i].mp_dstapic == MP_APIC_ALL)
if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
mp_irqs[i].dstapic == MP_APIC_ALL)
break;
if (!test_bit(lbus, mp_bus_not_pci) &&
!mp_irqs[i].mp_irqtype &&
!mp_irqs[i].irqtype &&
(bus == lbus) &&
(slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
(slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
if (!(apic || IO_APIC_IRQ(irq)))
continue;
if (pin == (mp_irqs[i].mp_srcbusirq & 3))
if (pin == (mp_irqs[i].srcbusirq & 3))
return irq;
/*
* Use the first all-but-pin matching entry as a
@ -1071,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq)
* EISA conforming in the MP table, that means its trigger type must
* be read in from the ELCR */
#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
#define default_EISA_polarity(idx) default_ISA_polarity(idx)
/* PCI interrupts are always polarity one level triggered,
@ -1088,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq)
static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mp_srcbus;
int bus = mp_irqs[idx].srcbus;
int polarity;
/*
* Determine IRQ line polarity (high active or low active):
*/
switch (mp_irqs[idx].mp_irqflag & 3)
switch (mp_irqs[idx].irqflag & 3)
{
case 0: /* conforms, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci))
@ -1130,13 +1131,13 @@ static int MPBIOS_polarity(int idx)
static int MPBIOS_trigger(int idx)
{
int bus = mp_irqs[idx].mp_srcbus;
int bus = mp_irqs[idx].srcbus;
int trigger;
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
switch ((mp_irqs[idx].irqflag>>2) & 3)
{
case 0: /* conforms, ie. bus-type dependent */
if (test_bit(bus, mp_bus_not_pci))
@ -1214,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
int irq, i;
int bus = mp_irqs[idx].mp_srcbus;
int bus = mp_irqs[idx].srcbus;
/*
* Debugging check, we are in big trouble if this message pops up!
*/
if (mp_irqs[idx].mp_dstirq != pin)
if (mp_irqs[idx].dstirq != pin)
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].mp_srcbusirq;
irq = mp_irqs[idx].srcbusirq;
} else {
/*
* PCI IRQs are mapped in order
@ -1566,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n",
apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
apic, mp_ioapics[apic].apicid, pin, cfg->vector,
irq, trigger, polarity);
if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
dest, trigger, polarity, cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic].mp_apicid, pin);
mp_ioapics[apic].apicid, pin);
__clear_irq_vector(irq, cfg);
return;
}
@ -1604,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void)
notcon = 1;
apic_printk(APIC_VERBOSE,
KERN_DEBUG " %d-%d",
mp_ioapics[apic].mp_apicid,
pin);
mp_ioapics[apic].apicid, pin);
} else
apic_printk(APIC_VERBOSE, " %d-%d",
mp_ioapics[apic].mp_apicid,
pin);
mp_ioapics[apic].apicid, pin);
continue;
}
if (notcon) {
@ -1699,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void)
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
for (i = 0; i < nr_ioapics; i++)
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
mp_ioapics[i].apicid, nr_ioapic_registers[i]);
/*
* We are a bit conservative about what we expect. We have to
@ -1719,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void)
spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@ -2121,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void)
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
old_id = mp_ioapics[apic].mp_apicid;
old_id = mp_ioapics[apic].apicid;
if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
apic, mp_ioapics[apic].mp_apicid);
apic, mp_ioapics[apic].apicid);
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
reg_00.bits.ID);
mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
mp_ioapics[apic].apicid = reg_00.bits.ID;
}
/*
@ -2137,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
if (check_apicid_used(phys_id_present_map,
mp_ioapics[apic].mp_apicid)) {
mp_ioapics[apic].apicid)) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
apic, mp_ioapics[apic].mp_apicid);
apic, mp_ioapics[apic].apicid);
for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map))
break;
@ -2148,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
physid_set(i, phys_id_present_map);
mp_ioapics[apic].mp_apicid = i;
mp_ioapics[apic].apicid = i;
} else {
physid_mask_t tmp;
tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
apic_printk(APIC_VERBOSE, "Setting %d in the "
"phys_id_present_map\n",
mp_ioapics[apic].mp_apicid);
mp_ioapics[apic].apicid);
physids_or(phys_id_present_map, phys_id_present_map, tmp);
}
@ -2163,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
* We need to adjust the IRQ routing table
* if the ID changed.
*/
if (old_id != mp_ioapics[apic].mp_apicid)
if (old_id != mp_ioapics[apic].apicid)
for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_dstapic == old_id)
mp_irqs[i].mp_dstapic
= mp_ioapics[apic].mp_apicid;
if (mp_irqs[i].dstapic == old_id)
mp_irqs[i].dstapic
= mp_ioapics[apic].apicid;
/*
* Read the right value from the MPC table and
@ -2175,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
*/
apic_printk(APIC_VERBOSE, KERN_INFO
"...changing IO-APIC physical APIC ID to %d ...",
mp_ioapics[apic].mp_apicid);
mp_ioapics[apic].apicid);
reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
reg_00.bits.ID = mp_ioapics[apic].apicid;
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0, reg_00.raw);
spin_unlock_irqrestore(&ioapic_lock, flags);
@ -2188,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
if (reg_00.bits.ID != mp_ioapics[apic].apicid)
printk("could not set ID!\n");
else
apic_printk(APIC_VERBOSE, " ok.\n");
@ -2383,7 +2382,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
cpumask_copy(&desc->affinity, mask);
cpumask_copy(desc->affinity, mask);
}
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@ -2405,11 +2404,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
}
/* everthing is clear. we have right of way */
migrate_ioapic_irq_desc(desc, &desc->pending_mask);
migrate_ioapic_irq_desc(desc, desc->pending_mask);
ret = 0;
desc->status &= ~IRQ_MOVE_PENDING;
cpumask_clear(&desc->pending_mask);
cpumask_clear(desc->pending_mask);
unmask:
unmask_IO_APIC_irq_desc(desc);
@ -2434,7 +2433,7 @@ static void ir_irq_migration(struct work_struct *work)
continue;
}
desc->chip->set_affinity(irq, &desc->pending_mask);
desc->chip->set_affinity(irq, desc->pending_mask);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
@ -2448,7 +2447,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{
if (desc->status & IRQ_LEVEL) {
desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(&desc->pending_mask, mask);
cpumask_copy(desc->pending_mask, mask);
migrate_irq_remapped_level_desc(desc);
return;
}
@ -2516,7 +2515,7 @@ static void irq_complete_move(struct irq_desc **descp)
/* domain has not changed, but affinity did */
me = smp_processor_id();
if (cpu_isset(me, desc->affinity)) {
if (cpumask_test_cpu(me, desc->affinity)) {
*descp = desc = move_irq_desc(desc, me);
/* get the new one */
cfg = desc->chip_data;
@ -3118,8 +3117,8 @@ static int ioapic_resume(struct sys_device *dev)
spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
}
spin_unlock_irqrestore(&ioapic_lock, flags);
@ -3184,7 +3183,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < NR_IRQS; new++) {
for (new = irq_want; new < nr_irqs; new++) {
if (platform_legacy_irq(new))
continue;
@ -3259,6 +3258,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
int err;
unsigned dest;
if (disable_apic)
return -ENXIO;
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (err)
@ -3727,6 +3729,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
struct irq_cfg *cfg;
int err;
if (disable_apic)
return -ENXIO;
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (!err) {
@ -3761,7 +3766,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
}
#endif /* CONFIG_HT_IRQ */
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_UV
/*
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
@ -3851,6 +3856,22 @@ void __init probe_nr_irqs_gsi(void)
nr_irqs_gsi = nr;
}
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
int nr;
nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
(NR_VECTORS + (8 * nr_cpu_ids)) :
(NR_VECTORS + (32 * nr_ioapics)));
if (nr < nr_irqs && nr > nr_irqs_gsi)
nr_irqs = nr;
return 0;
}
#endif
/* --------------------------------------------------------------------------
ACPI-based IOAPIC Configuration
-------------------------------------------------------------------------- */
@ -3985,8 +4006,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
return -1;
for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_irqtype == mp_INT &&
mp_irqs[i].mp_srcbusirq == bus_irq)
if (mp_irqs[i].irqtype == mp_INT &&
mp_irqs[i].srcbusirq == bus_irq)
break;
if (i >= mp_irq_entries)
return -1;
@ -4040,7 +4061,7 @@ void __init setup_ioapic_dest(void)
*/
if (desc->status &
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
mask = &desc->affinity;
mask = desc->affinity;
else
mask = TARGET_CPUS;
@ -4101,7 +4122,7 @@ void __init ioapic_init_mappings(void)
ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mp_apicaddr;
ioapic_phys = mp_ioapics[i].apicaddr;
#ifdef CONFIG_X86_32
if (!ioapic_phys) {
printk(KERN_ERR

Просмотреть файл

@ -36,11 +36,7 @@ void ack_bad_irq(unsigned int irq)
#endif
}
#ifdef CONFIG_X86_32
# define irq_stats(x) (&per_cpu(irq_stat, x))
#else
# define irq_stats(x) cpu_pda(x)
#endif
#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing:
*/

Просмотреть файл

@ -248,7 +248,7 @@ void fixup_irqs(void)
if (irq == 2)
continue;
affinity = &desc->affinity;
affinity = desc->affinity;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
affinity = cpu_all_mask;

Просмотреть файл

@ -18,6 +18,13 @@
#include <linux/smp.h>
#include <asm/io_apic.h>
#include <asm/idle.h>
#include <asm/apic.h>
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
DEFINE_PER_CPU(struct pt_regs *, irq_regs);
EXPORT_PER_CPU_SYMBOL(irq_regs);
/*
* Probabilistic stack overflow check:
@ -100,7 +107,7 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */
spin_lock(&desc->lock);
affinity = &desc->affinity;
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);

Просмотреть файл

@ -140,8 +140,15 @@ void __init native_init_IRQ(void)
*/
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPI for invalidation */
alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
/* IPIs for invalidation */
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);

Просмотреть файл

@ -87,9 +87,9 @@
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/microcode.h>
@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
}
static inline int
static inline int
update_match_revision(struct microcode_header_intel *mc_header, int rev)
{
return (mc_header->rev <= rev) ? 0 : 1;
@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
return ret;
}
ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
&get_ucode_fw);
ret = generic_load_microcode(cpu, (void *)firmware->data,
firmware->size, &get_ucode_fw);
release_firmware(firmware);
@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
/* We should bind the task to the CPU */
BUG_ON(cpu != raw_smp_processor_id());
return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
static void microcode_fini_cpu(int cpu)

Просмотреть файл

@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
table entries. */
}
/* We don't need anything special. */
@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
*para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".text", secstrings + s->sh_name))
text = s;
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s;
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}

Просмотреть файл

@ -30,14 +30,14 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#define DEBUGP(fmt...)
#define DEBUGP(fmt...)
#ifndef CONFIG_UML
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
table entries. */
}
void *module_alloc(unsigned long size)
@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
void *loc;
u64 val;
u64 val;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)loc);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)loc);
val = sym->st_value + rel[i].r_addend;
val = sym->st_value + rel[i].r_addend;
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_X86_64_NONE:
@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
case R_X86_64_PC32:
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
goto overflow;
#endif
break;
default:
printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
overflow:
printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
me->name);
@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
unsigned int relsec,
struct module *me)
{
printk("non add relocation not supported\n");
printk(KERN_ERR "non add relocation not supported\n");
return -ENOSYS;
}
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL;
@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s;
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}

Просмотреть файл

@ -144,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
if (bad_ioapic(m->apicaddr))
return;
mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
mp_ioapics[nr_ioapics].mp_type = m->type;
mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
mp_ioapics[nr_ioapics].mp_flags = m->flags;
mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
mp_ioapics[nr_ioapics].apicid = m->apicid;
mp_ioapics[nr_ioapics].type = m->type;
mp_ioapics[nr_ioapics].apicver = m->apicver;
mp_ioapics[nr_ioapics].flags = m->flags;
nr_ioapics++;
}
@ -160,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
m->srcbusirq, m->dstapic, m->dstirq);
}
static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
{
apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
(mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
mp_irq->irqtype, mp_irq->irqflag & 3,
(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
}
static void __init assign_to_mp_irq(struct mpc_intsrc *m,
struct mp_config_intsrc *mp_irq)
struct mpc_intsrc *mp_irq)
{
mp_irq->mp_dstapic = m->dstapic;
mp_irq->mp_type = m->type;
mp_irq->mp_irqtype = m->irqtype;
mp_irq->mp_irqflag = m->irqflag;
mp_irq->mp_srcbus = m->srcbus;
mp_irq->mp_srcbusirq = m->srcbusirq;
mp_irq->mp_dstirq = m->dstirq;
mp_irq->dstapic = m->dstapic;
mp_irq->type = m->type;
mp_irq->irqtype = m->irqtype;
mp_irq->irqflag = m->irqflag;
mp_irq->srcbus = m->srcbus;
mp_irq->srcbusirq = m->srcbusirq;
mp_irq->dstirq = m->dstirq;
}
static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m)
{
m->dstapic = mp_irq->mp_dstapic;
m->type = mp_irq->mp_type;
m->irqtype = mp_irq->mp_irqtype;
m->irqflag = mp_irq->mp_irqflag;
m->srcbus = mp_irq->mp_srcbus;
m->srcbusirq = mp_irq->mp_srcbusirq;
m->dstirq = mp_irq->mp_dstirq;
m->dstapic = mp_irq->dstapic;
m->type = mp_irq->type;
m->irqtype = mp_irq->irqtype;
m->irqflag = mp_irq->irqflag;
m->srcbus = mp_irq->srcbus;
m->srcbusirq = mp_irq->srcbusirq;
m->dstirq = mp_irq->dstirq;
}
static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m)
{
if (mp_irq->mp_dstapic != m->dstapic)
if (mp_irq->dstapic != m->dstapic)
return 1;
if (mp_irq->mp_type != m->type)
if (mp_irq->type != m->type)
return 2;
if (mp_irq->mp_irqtype != m->irqtype)
if (mp_irq->irqtype != m->irqtype)
return 3;
if (mp_irq->mp_irqflag != m->irqflag)
if (mp_irq->irqflag != m->irqflag)
return 4;
if (mp_irq->mp_srcbus != m->srcbus)
if (mp_irq->srcbus != m->srcbus)
return 5;
if (mp_irq->mp_srcbusirq != m->srcbusirq)
if (mp_irq->srcbusirq != m->srcbusirq)
return 6;
if (mp_irq->mp_dstirq != m->dstirq)
if (mp_irq->dstirq != m->dstirq)
return 7;
return 0;
@ -417,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
intsrc.type = MP_INTSRC;
intsrc.irqflag = 0; /* conforming */
intsrc.srcbus = 0;
intsrc.dstapic = mp_ioapics[0].mp_apicid;
intsrc.dstapic = mp_ioapics[0].apicid;
intsrc.irqtype = mp_INT;
@ -570,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
}
}
static struct intel_mp_floating *mpf_found;
static struct mpf_intel *mpf_found;
/*
* Scan the memory blocks for an SMP configuration block.
*/
static void __init __get_smp_config(unsigned int early)
{
struct intel_mp_floating *mpf = mpf_found;
struct mpf_intel *mpf = mpf_found;
if (!mpf)
return;
@ -598,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
}
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
mpf->mpf_specification);
mpf->specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
if (mpf->mpf_feature2 & (1 << 7)) {
if (mpf->feature2 & (1 << 7)) {
printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
pic_mode = 1;
} else {
@ -611,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
/*
* Now see if we need to read further.
*/
if (mpf->mpf_feature1 != 0) {
if (mpf->feature1 != 0) {
if (early) {
/*
* local APIC has default address
@ -621,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
}
printk(KERN_INFO "Default MP configuration #%d\n",
mpf->mpf_feature1);
construct_default_ISA_mptable(mpf->mpf_feature1);
mpf->feature1);
construct_default_ISA_mptable(mpf->feature1);
} else if (mpf->mpf_physptr) {
} else if (mpf->physptr) {
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0;
#endif
@ -688,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned reserve)
{
unsigned int *bp = phys_to_virt(base);
struct intel_mp_floating *mpf;
struct mpf_intel *mpf;
apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
bp, length);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
mpf = (struct intel_mp_floating *)bp;
mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) &&
(mpf->mpf_length == 1) &&
(mpf->length == 1) &&
!mpf_checksum((unsigned char *)bp, 16) &&
((mpf->mpf_specification == 1)
|| (mpf->mpf_specification == 4))) {
((mpf->specification == 1)
|| (mpf->specification == 4))) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
#endif
@ -713,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
return 1;
reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
BOOTMEM_DEFAULT);
if (mpf->mpf_physptr) {
if (mpf->physptr) {
unsigned long size = PAGE_SIZE;
#ifdef CONFIG_X86_32
/*
@ -722,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
* the bottom is mapped now.
* PC-9800's MPC table places on the very last
* of physical memory; so that simply reserving
* PAGE_SIZE from mpg->mpf_physptr yields BUG()
* PAGE_SIZE from mpf->physptr yields BUG()
* in reserve_bootmem.
*/
unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->mpf_physptr + size > end)
size = end - mpf->mpf_physptr;
if (mpf->physptr + size > end)
size = end - mpf->physptr;
#endif
reserve_bootmem_generic(mpf->mpf_physptr, size,
reserve_bootmem_generic(mpf->physptr, size,
BOOTMEM_DEFAULT);
}
@ -809,15 +809,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
/* not legacy */
for (i = 0; i < mp_irq_entries; i++) {
if (mp_irqs[i].mp_irqtype != mp_INT)
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].mp_irqflag != 0x0f)
if (mp_irqs[i].irqflag != 0x0f)
continue;
if (mp_irqs[i].mp_srcbus != m->srcbus)
if (mp_irqs[i].srcbus != m->srcbus)
continue;
if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
if (mp_irqs[i].srcbusirq != m->srcbusirq)
continue;
if (irq_used[i]) {
/* already claimed */
@ -922,10 +922,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (irq_used[i])
continue;
if (mp_irqs[i].mp_irqtype != mp_INT)
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].mp_irqflag != 0x0f)
if (mp_irqs[i].irqflag != 0x0f)
continue;
if (nr_m_spare > 0) {
@ -1001,7 +1001,7 @@ static int __init update_mp_table(void)
{
char str[16];
char oem[10];
struct intel_mp_floating *mpf;
struct mpf_intel *mpf;
struct mpc_table *mpc, *mpc_new;
if (!enable_update_mptable)
@ -1014,19 +1014,19 @@ static int __init update_mp_table(void)
/*
* Now see if we need to go further.
*/
if (mpf->mpf_feature1 != 0)
if (mpf->feature1 != 0)
return 0;
if (!mpf->mpf_physptr)
if (!mpf->physptr)
return 0;
mpc = phys_to_virt(mpf->mpf_physptr);
mpc = phys_to_virt(mpf->physptr);
if (!smp_check_mpc(mpc, oem, str))
return 0;
printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
printk(KERN_INFO "physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) {
mpc_new_phys = 0;
@ -1047,23 +1047,23 @@ static int __init update_mp_table(void)
}
printk(KERN_INFO "use in-positon replacing\n");
} else {
mpf->mpf_physptr = mpc_new_phys;
mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys);
memcpy(mpc_new, mpc, mpc->length);
mpc = mpc_new;
/* check if we can modify that */
if (mpc_new_phys - mpf->mpf_physptr) {
struct intel_mp_floating *mpf_new;
if (mpc_new_phys - mpf->physptr) {
struct mpf_intel *mpf_new;
/* steal 16 bytes from [0, 1k) */
printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
mpf_new = phys_to_virt(0x400 - 16);
memcpy(mpf_new, mpf, 16);
mpf = mpf_new;
mpf->mpf_physptr = mpc_new_phys;
mpf->physptr = mpc_new_phys;
}
mpf->mpf_checksum = 0;
mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
mpf->checksum = 0;
mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
}
/*

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше