[PATCH] cpu hotplug: revert init patch submitted for 2.6.17
In 2.6.17, there was a problem with cpu_notifiers and XFS. I provided a band-aid solution to solve that problem. In the process, i undid all the changes you both were making to ensure that these notifiers were available only at init time (unless CONFIG_HOTPLUG_CPU is defined). We deferred the real fix to 2.6.18. Here is a set of patches that fixes the XFS problem cleanly and makes the cpu notifiers available only at init time (unless CONFIG_HOTPLUG_CPU is defined). If CONFIG_HOTPLUG_CPU is defined then cpu notifiers are available at run time. This patch reverts the notifier_call changes made in 2.6.17 Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
6ac12dfe9c
Коммит
9c7b216d23
|
@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
|
|||
return;
|
||||
}
|
||||
|
||||
static int cacheinfo_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static int palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
static int __devinit palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
|
|
|
@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int
|
||||
static int __devinit
|
||||
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int i, cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -404,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
|||
* When a cpu is hot-plugged, do a check and initiate
|
||||
* cache kobject if necessary
|
||||
*/
|
||||
static int cache_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
|
|||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int sysfs_cpu_notify(struct notifier_block *self,
|
||||
static int __devinit sysfs_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)(long)hcpu;
|
||||
|
|
|
@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
|||
#endif
|
||||
|
||||
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
|
||||
static int
|
||||
static __cpuinit int
|
||||
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int topology_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -1497,7 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL(cpufreq_update_policy);
|
||||
|
||||
static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu)
|
|||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int hrtimer_cpu_notify(struct notifier_block *self,
|
||||
static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
|
|
@ -299,7 +299,7 @@ out:
|
|||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int profile_cpu_callback(struct notifier_block *info,
|
||||
static int __devinit profile_cpu_callback(struct notifier_block *info,
|
||||
unsigned long action, void *__cpu)
|
||||
{
|
||||
int node, cpu = (unsigned long)__cpu;
|
||||
|
|
|
@ -548,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
|
|||
tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
|
||||
}
|
||||
|
||||
static int rcu_cpu_notify(struct notifier_block *self,
|
||||
static int __devinit rcu_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
|
|
@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
|
|||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int cpu_callback(struct notifier_block *nfb,
|
||||
static int __devinit cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
|
|
|
@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
|
|||
/*
|
||||
* Create/destroy watchdog threads as CPUs come and go:
|
||||
*/
|
||||
static int
|
||||
static int __devinit
|
||||
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
int hotcpu = (unsigned long)hcpu;
|
||||
|
|
|
@ -1652,7 +1652,7 @@ static void __devinit migrate_timers(int cpu)
|
|||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int timer_cpu_notify(struct notifier_block *self,
|
||||
static int __devinit timer_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
|
|
@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
|
|||
}
|
||||
|
||||
/* We're holding the cpucontrol mutex here */
|
||||
static int workqueue_cpu_callback(struct notifier_block *nfb,
|
||||
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
|
|
|
@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
static int pageset_cpuup_callback(struct notifier_block *nfb,
|
||||
static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
|
|
|
@ -1073,7 +1073,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
|
|||
|
||||
#endif
|
||||
|
||||
static int cpuup_callback(struct notifier_block *nfb,
|
||||
static int __devinit cpuup_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
|
|
|
@ -1450,7 +1450,7 @@ out:
|
|||
not required for correctness. So if the last cpu in a node goes
|
||||
away, we get changed to run anywhere: as the first one comes back,
|
||||
restore their cpu bindings. */
|
||||
static int cpu_callback(struct notifier_block *nfb,
|
||||
static int __devinit cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
|
|
Загрузка…
Ссылка в новой задаче