x86, asmlinkage: Make several variables used from assembler/linker script visible
Plus one function, load_gs_index(). Signed-off-by: Andi Kleen <ak@linux.intel.com> Link: http://lkml.kernel.org/r/1375740170-7446-10-git-send-email-andi@firstfloor.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Родитель
04bb591ca7
Коммит
277d5b40b7
|
@ -22,7 +22,8 @@
|
||||||
* ZERO_PAGE is a global shared page that is always zero: used
|
* ZERO_PAGE is a global shared page that is always zero: used
|
||||||
* for zero-mapped memory areas etc..
|
* for zero-mapped memory areas etc..
|
||||||
*/
|
*/
|
||||||
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||||
|
__visible;
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||||
|
|
||||||
extern spinlock_t pgd_lock;
|
extern spinlock_t pgd_lock;
|
||||||
|
|
|
@ -412,7 +412,7 @@ union irq_stack_union {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
|
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
|
||||||
DECLARE_INIT_PER_CPU(irq_stack_union);
|
DECLARE_INIT_PER_CPU(irq_stack_union);
|
||||||
|
|
||||||
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
||||||
|
|
|
@ -101,7 +101,7 @@ static inline void native_wbinvd(void)
|
||||||
asm volatile("wbinvd": : :"memory");
|
asm volatile("wbinvd": : :"memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void native_load_gs_index(unsigned);
|
extern asmlinkage void native_load_gs_index(unsigned);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
|
|
|
@ -66,8 +66,8 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||||
* performance at the same time..
|
* performance at the same time..
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void vide(void);
|
extern __visible void vide(void);
|
||||||
__asm__(".align 4\nvide: ret");
|
__asm__(".globl vide\n\t.align 4\nvide: ret");
|
||||||
|
|
||||||
static void init_amd_k5(struct cpuinfo_x86 *c)
|
static void init_amd_k5(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
|
||||||
(unsigned long) debug_idt_table };
|
(unsigned long) debug_idt_table };
|
||||||
|
|
||||||
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
||||||
irq_stack_union) __aligned(PAGE_SIZE);
|
irq_stack_union) __aligned(PAGE_SIZE) __visible;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following four percpu variables are hot. Align current_task to
|
* The following four percpu variables are hot. Align current_task to
|
||||||
|
@ -1093,7 +1093,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
|
||||||
DEFINE_PER_CPU(char *, irq_stack_ptr) =
|
DEFINE_PER_CPU(char *, irq_stack_ptr) =
|
||||||
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
|
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
|
||||||
|
|
||||||
DEFINE_PER_CPU(unsigned int, irq_count) = -1;
|
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
|
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
* section. Since TSS's are completely CPU-local, we want them
|
* section. Since TSS's are completely CPU-local, we want them
|
||||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||||
*/
|
*/
|
||||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
static DEFINE_PER_CPU(unsigned char, is_idle);
|
static DEFINE_PER_CPU(unsigned char, is_idle);
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
|
|
||||||
asmlinkage extern void ret_from_fork(void);
|
asmlinkage extern void ret_from_fork(void);
|
||||||
|
|
||||||
DEFINE_PER_CPU(unsigned long, old_rsp);
|
asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
|
||||||
|
|
||||||
/* Prints also some state that isn't saved in the pt_regs */
|
/* Prints also some state that isn't saved in the pt_regs */
|
||||||
void __show_regs(struct pt_regs *regs, int all)
|
void __show_regs(struct pt_regs *regs, int all)
|
||||||
|
|
|
@ -206,9 +206,9 @@ EXPORT_SYMBOL(boot_cpu_data);
|
||||||
|
|
||||||
|
|
||||||
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
|
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
|
||||||
unsigned long mmu_cr4_features;
|
__visible unsigned long mmu_cr4_features;
|
||||||
#else
|
#else
|
||||||
unsigned long mmu_cr4_features = X86_CR4_PAE;
|
__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
|
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
|
||||||
|
|
|
@ -728,7 +728,7 @@ static void do_signal(struct pt_regs *regs)
|
||||||
* notification of userspace execution resumption
|
* notification of userspace execution resumption
|
||||||
* - triggered by the TIF_WORK_MASK flags
|
* - triggered by the TIF_WORK_MASK flags
|
||||||
*/
|
*/
|
||||||
void
|
__visible void
|
||||||
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
||||||
{
|
{
|
||||||
user_exit();
|
user_exit();
|
||||||
|
|
|
@ -68,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
|
||||||
* Since protection fault in copy_from/to_user is not a normal situation,
|
* Since protection fault in copy_from/to_user is not a normal situation,
|
||||||
* it is not necessary to optimize tail handling.
|
* it is not necessary to optimize tail handling.
|
||||||
*/
|
*/
|
||||||
unsigned long
|
__visible unsigned long
|
||||||
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
||||||
{
|
{
|
||||||
char c;
|
char c;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче