asmlinkage, x86: Add explicit __visible to arch/x86/*
As requested by Linus add explicit __visible to the asmlinkage users. This marks all functions visible to assembler. Tree sweep for arch/x86/* Signed-off-by: Andi Kleen <ak@linux.intel.com> Link: http://lkml.kernel.org/r/1398984278-29319-3-git-send-email-andi@firstfloor.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Родитель
3adc1beacd
Коммит
2605fc216f
|
@ -354,7 +354,7 @@ static void parse_elf(void *output)
|
|||
free(phdrs);
|
||||
}
|
||||
|
||||
asmlinkage void *decompress_kernel(void *rmode, memptr heap,
|
||||
asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
|
||||
unsigned char *input_data,
|
||||
unsigned long input_len,
|
||||
unsigned char *output,
|
||||
|
|
|
@ -31,7 +31,7 @@ static char temp_stack[4096];
|
|||
*
|
||||
* Wrapper around acpi_enter_sleep_state() to be called by assmebly.
|
||||
*/
|
||||
acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
|
||||
acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
|
||||
{
|
||||
return acpi_enter_sleep_state(state);
|
||||
}
|
||||
|
|
|
@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
|
|||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
|
||||
{
|
||||
unsigned vector, me;
|
||||
|
||||
|
|
|
@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
|
|||
smp_thermal_vector();
|
||||
}
|
||||
|
||||
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
|
||||
asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_thermal_interrupt();
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
||||
asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
|
||||
asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
|
||||
|
|
|
@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
|
|||
mce_threshold_vector();
|
||||
}
|
||||
|
||||
asmlinkage void smp_threshold_interrupt(void)
|
||||
asmlinkage __visible void smp_threshold_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_threshold_interrupt();
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
||||
asmlinkage void smp_trace_threshold_interrupt(void)
|
||||
asmlinkage __visible void smp_trace_threshold_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
|
||||
|
|
|
@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
|
|||
reserve_ebda_region();
|
||||
}
|
||||
|
||||
asmlinkage void __init i386_start_kernel(void)
|
||||
asmlinkage __visible void __init i386_start_kernel(void)
|
||||
{
|
||||
sanitize_boot_params(&boot_params);
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
|
|||
}
|
||||
}
|
||||
|
||||
asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
|
||||
asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
|
||||
asmlinkage extern void ret_from_fork(void);
|
||||
|
||||
asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
|
||||
__visible DEFINE_PER_CPU(unsigned long, old_rsp);
|
||||
|
||||
/* Prints also some state that isn't saved in the pt_regs */
|
||||
void __show_regs(struct pt_regs *regs, int all)
|
||||
|
|
|
@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
|
|||
* this function calls the 'stop' function on all other CPUs in the system.
|
||||
*/
|
||||
|
||||
asmlinkage void smp_reboot_interrupt(void)
|
||||
asmlinkage __visible void smp_reboot_interrupt(void)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
irq_enter();
|
||||
|
|
|
@ -357,7 +357,7 @@ exit:
|
|||
* for scheduling or signal handling. The actual stack switch is done in
|
||||
* entry.S
|
||||
*/
|
||||
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||||
asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||||
{
|
||||
struct pt_regs *regs = eregs;
|
||||
/* Did already sync */
|
||||
|
@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
|
|||
#endif
|
||||
}
|
||||
|
||||
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
|
||||
asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
|
||||
{
|
||||
}
|
||||
|
||||
asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
|
||||
asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ static int irq_routing_comply = 1;
|
|||
* and vice versa.
|
||||
*/
|
||||
|
||||
asmlinkage unsigned long vsmp_save_fl(void)
|
||||
asmlinkage __visible unsigned long vsmp_save_fl(void)
|
||||
{
|
||||
unsigned long flags = native_save_fl();
|
||||
|
||||
|
@ -56,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
|
|||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
|
||||
|
||||
asmlinkage void vsmp_irq_disable(void)
|
||||
asmlinkage __visible void vsmp_irq_disable(void)
|
||||
{
|
||||
unsigned long flags = native_save_fl();
|
||||
|
||||
|
@ -64,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
|
|||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
|
||||
|
||||
asmlinkage void vsmp_irq_enable(void)
|
||||
asmlinkage __visible void vsmp_irq_enable(void)
|
||||
{
|
||||
unsigned long flags = native_save_fl();
|
||||
|
||||
|
|
|
@ -280,7 +280,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
|
||||
|
||||
asmlinkage void kvm_spurious_fault(void)
|
||||
asmlinkage __visible void kvm_spurious_fault(void)
|
||||
{
|
||||
/* Fault while not rebooting. We want the trace. */
|
||||
BUG();
|
||||
|
|
|
@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
|
|||
* flags word contains all kind of stuff, but in practice Linux only cares
|
||||
* about the interrupt flag. Our "save_flags()" just returns that.
|
||||
*/
|
||||
asmlinkage unsigned long lguest_save_fl(void)
|
||||
asmlinkage __visible unsigned long lguest_save_fl(void)
|
||||
{
|
||||
return lguest_data.irq_enabled;
|
||||
}
|
||||
|
||||
/* Interrupts go off... */
|
||||
asmlinkage void lguest_irq_disable(void)
|
||||
asmlinkage __visible void lguest_irq_disable(void)
|
||||
{
|
||||
lguest_data.irq_enabled = 0;
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ static struct {
|
|||
0x242 in div_Xsig.S
|
||||
*/
|
||||
|
||||
asmlinkage void FPU_exception(int n)
|
||||
asmlinkage __visible void FPU_exception(int n)
|
||||
{
|
||||
int i, int_type;
|
||||
|
||||
|
@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb,
|
|||
|
||||
/* Invalid arith operation on Valid registers */
|
||||
/* Returns < 0 if the exception is unmasked */
|
||||
asmlinkage int arith_invalid(int deststnr)
|
||||
asmlinkage __visible int arith_invalid(int deststnr)
|
||||
{
|
||||
|
||||
EXCEPTION(EX_Invalid);
|
||||
|
@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr)
|
|||
}
|
||||
|
||||
/* Divide a finite number by zero */
|
||||
asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
|
||||
asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
|
||||
{
|
||||
FPU_REG *dest = &st(deststnr);
|
||||
int tag = TAG_Valid;
|
||||
|
@ -539,7 +539,7 @@ int set_precision_flag(int flags)
|
|||
}
|
||||
|
||||
/* This may be called often, so keep it lean */
|
||||
asmlinkage void set_precision_flag_up(void)
|
||||
asmlinkage __visible void set_precision_flag_up(void)
|
||||
{
|
||||
if (control_word & CW_Precision)
|
||||
partial_status |= (SW_Precision | SW_C1); /* The masked response */
|
||||
|
@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void)
|
|||
}
|
||||
|
||||
/* This may be called often, so keep it lean */
|
||||
asmlinkage void set_precision_flag_down(void)
|
||||
asmlinkage __visible void set_precision_flag_down(void)
|
||||
{
|
||||
if (control_word & CW_Precision) { /* The masked response */
|
||||
partial_status &= ~SW_C1;
|
||||
|
@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void)
|
|||
EXCEPTION(EX_Precision);
|
||||
}
|
||||
|
||||
asmlinkage int denormal_operand(void)
|
||||
asmlinkage __visible int denormal_operand(void)
|
||||
{
|
||||
if (control_word & CW_Denormal) { /* The masked response */
|
||||
partial_status |= SW_Denorm_Op;
|
||||
|
@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void)
|
|||
}
|
||||
}
|
||||
|
||||
asmlinkage int arith_overflow(FPU_REG *dest)
|
||||
asmlinkage __visible int arith_overflow(FPU_REG *dest)
|
||||
{
|
||||
int tag = TAG_Valid;
|
||||
|
||||
|
@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest)
|
|||
|
||||
}
|
||||
|
||||
asmlinkage int arith_underflow(FPU_REG *dest)
|
||||
asmlinkage __visible int arith_underflow(FPU_REG *dest)
|
||||
{
|
||||
int tag = TAG_Valid;
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage int xo1_do_sleep(u8 sleep_state)
|
||||
asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
|
||||
{
|
||||
void *pgd_addr = __va(read_cr3());
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
extern __visible const void __nosave_begin, __nosave_end;
|
||||
|
||||
/* Defined in hibernate_asm_64.S */
|
||||
extern asmlinkage int restore_image(void);
|
||||
extern asmlinkage __visible int restore_image(void);
|
||||
|
||||
/*
|
||||
* Address to jump to in the last phase of restore in order to get to the image
|
||||
|
|
|
@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void)
|
|||
}
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage void __init xen_start_kernel(void)
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
{
|
||||
struct physdev_set_iopl set_iopl;
|
||||
int rc;
|
||||
|
|
|
@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
|
|||
(void)HYPERVISOR_xen_version(0, NULL);
|
||||
}
|
||||
|
||||
asmlinkage unsigned long xen_save_fl(void)
|
||||
asmlinkage __visible unsigned long xen_save_fl(void)
|
||||
{
|
||||
struct vcpu_info *vcpu;
|
||||
unsigned long flags;
|
||||
|
@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags)
|
|||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
|
||||
|
||||
asmlinkage void xen_irq_disable(void)
|
||||
asmlinkage __visible void xen_irq_disable(void)
|
||||
{
|
||||
/* There's a one instruction preempt window here. We need to
|
||||
make sure we're don't switch CPUs between getting the vcpu
|
||||
|
@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void)
|
|||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
|
||||
|
||||
asmlinkage void xen_irq_enable(void)
|
||||
asmlinkage __visible void xen_irq_enable(void)
|
||||
{
|
||||
struct vcpu_info *vcpu;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче