[PATCH] i386: __devinit should be __cpuinit
Several places in arch/i386/kernel/cpu and kernel/cpu were using __devinit when they should have been __cpuinit. Fixing that saves ~4K when CONFIG_HOTPLUG && !CONFIG_HOTPLUG_CPU. Noticed by Andrew Morton. Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
9a0b5817ad
Коммит
3bc9b76bed
|
@ -25,9 +25,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
|
|||
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
|
||||
|
||||
static int cachesize_override __devinitdata = -1;
|
||||
static int disable_x86_fxsr __devinitdata = 0;
|
||||
static int disable_x86_serial_nr __devinitdata = 1;
|
||||
static int cachesize_override __cpuinitdata = -1;
|
||||
static int disable_x86_fxsr __cpuinitdata = 0;
|
||||
static int disable_x86_serial_nr __cpuinitdata = 1;
|
||||
|
||||
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
|
@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str)
|
|||
}
|
||||
__setup("cachesize=", cachesize_setup);
|
||||
|
||||
int __devinit get_model_name(struct cpuinfo_x86 *c)
|
||||
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
char *p, *q;
|
||||
|
@ -89,7 +89,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
|
||||
void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int n, dummy, ecx, edx, l2size;
|
||||
|
||||
|
@ -130,7 +130,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
|
|||
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
|
||||
|
||||
/* Look up CPU names by table lookup. */
|
||||
static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
|
||||
static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpu_model_info *info;
|
||||
|
||||
|
@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
|
||||
static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
|
||||
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
|
||||
{
|
||||
char *v = c->x86_vendor_id;
|
||||
int i;
|
||||
|
@ -210,7 +210,7 @@ static inline int flag_is_changeable_p(u32 flag)
|
|||
|
||||
|
||||
/* Probe for the CPUID instruction */
|
||||
static int __devinit have_cpuid_p(void)
|
||||
static int __cpuinit have_cpuid_p(void)
|
||||
{
|
||||
return flag_is_changeable_p(X86_EFLAGS_ID);
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ static void __init early_cpu_detect(void)
|
|||
}
|
||||
}
|
||||
|
||||
void __devinit generic_identify(struct cpuinfo_x86 * c)
|
||||
void __cpuinit generic_identify(struct cpuinfo_x86 * c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
int junk;
|
||||
|
@ -307,7 +307,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
|
||||
/* Disable processor serial number */
|
||||
|
@ -335,7 +335,7 @@ __setup("serialnumber", x86_serial_nr_setup);
|
|||
/*
|
||||
* This does the hard work of actually picking apart the CPU stuff...
|
||||
*/
|
||||
void __devinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -453,7 +453,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_X86_HT
|
||||
void __devinit detect_ht(struct cpuinfo_x86 *c)
|
||||
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
int index_msb, core_bits;
|
||||
|
@ -500,7 +500,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c)
|
|||
}
|
||||
#endif
|
||||
|
||||
void __devinit print_cpu_info(struct cpuinfo_x86 *c)
|
||||
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
|
||||
{
|
||||
char *vendor = NULL;
|
||||
|
||||
|
@ -523,7 +523,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c)
|
|||
printk("\n");
|
||||
}
|
||||
|
||||
cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
|
||||
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
||||
|
||||
/* This is hacky. :)
|
||||
* We're emulating future behavior.
|
||||
|
@ -570,7 +570,7 @@ void __init early_cpu_init(void)
|
|||
* and IDT. We reload them nevertheless, this function acts as a
|
||||
* 'CPU state barrier', nothing should get across.
|
||||
*/
|
||||
void __devinit cpu_init(void)
|
||||
void __cpuinit cpu_init(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct tss_struct * t = &per_cpu(init_tss, cpu);
|
||||
|
@ -670,7 +670,7 @@ void __devinit cpu_init(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void __devinit cpu_uninit(void)
|
||||
void __cpuinit cpu_uninit(void)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
cpu_clear(cpu, cpu_initialized);
|
||||
|
|
|
@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void);
|
|||
struct movsl_mask movsl_mask __read_mostly;
|
||||
#endif
|
||||
|
||||
void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
|
||||
void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL)
|
||||
return;
|
||||
|
@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
|
|||
* This is called before we do cpu ident work
|
||||
*/
|
||||
|
||||
int __devinit ppro_with_ram_bug(void)
|
||||
int __cpuinit ppro_with_ram_bug(void)
|
||||
{
|
||||
/* Uses data from early_cpu_detect now */
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
|
@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void)
|
|||
* P4 Xeon errata 037 workaround.
|
||||
* Hardware prefetcher may cause stale data to be loaded into the cache.
|
||||
*/
|
||||
static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned long lo, hi;
|
||||
|
||||
|
@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
|
|||
/*
|
||||
* find out the number of processor cores on the die
|
||||
*/
|
||||
static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
|
||||
static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
|
||||
|
@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void __devinit init_intel(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int l2 = 0;
|
||||
char *p = NULL;
|
||||
|
@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
|
|||
return size;
|
||||
}
|
||||
|
||||
static struct cpu_dev intel_cpu_dev __devinitdata = {
|
||||
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
||||
.c_vendor = "Intel",
|
||||
.c_ident = { "GenuineIntel" },
|
||||
.c_models = {
|
||||
|
|
|
@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
|||
}
|
||||
}
|
||||
}
|
||||
static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
||||
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
||||
{
|
||||
struct _cpuid4_info *this_leaf, *sibling_leaf;
|
||||
int sibling;
|
||||
|
|
Загрузка…
Ссылка в новой задаче