diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index b2e462abf337..c58d0c14f274 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c @@ -36,7 +36,7 @@ static void timer_stop(void) { enable_timer_nmi_watchdog(); unset_nmi_callback(); - synchronize_kernel(); + synchronize_sched(); /* Allow already-started NMIs to complete. */ } diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c index 9802beefa217..f8f19637f73f 100644 --- a/arch/ppc64/kernel/HvLpEvent.c +++ b/arch/ppc64/kernel/HvLpEvent.c @@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ) /* We now sleep until all other CPUs have scheduled. This ensures that * the deletion is seen by all other CPUs, and that the deleted handler * isn't still running on another CPU when we return. */ - synchronize_kernel(); + synchronize_rcu(); } } return rc; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 05a17812d521..ff64d333e95f 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr) /* Fall back to the default idle loop */ pm_idle = pm_idle_save; - synchronize_kernel(); + synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ pr->flags.power = 0; result = acpi_processor_get_power_info(pr); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 29de259a981e..44a7f13c788b 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the interrupt. */ - synchronize_kernel(); + synchronize_sched(); if (new_smi->si_sm) { if (new_smi->handlers) @@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the interrupt. */ - synchronize_kernel(); + synchronize_sched(); /* Wait for the timer to stop. This avoids problems with race conditions removing the timer here. */ diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index f7304f0ce542..ff66ed4ee2cd 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio) atkbd_disable(atkbd); /* make sure we don't have a command in flight */ - synchronize_kernel(); + synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */ flush_scheduled_work(); device_remove_file(&serio->dev, &atkbd_attr_extra); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index c9b134cd1532..1891e4930dcc 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a389394b52f6..83380b5d6593 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b100bfe4fdca..e9dc2876a626 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 52c3a81c4aa7..e96e2a10a9c9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 7e30ab29691a..8a33f351e092 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number) goto abort; } p->rdev = NULL; - synchronize_kernel(); + synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 07e2df09491f..c59507f8a76b 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2385,7 +2385,7 @@ core_down: } /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_kernel(); + synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ /* * And now for the 50k$ question: are IRQ disabled or not ? diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 3720e77b465f..83e6a060668e 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler) else ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); if (!ret) - synchronize_kernel(); + synchronize_sched(); /* Allow interrupts to complete. */ sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (4, dbf_txt); @@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler) ret = -EINVAL; else { adapter_handler = NULL; - synchronize_kernel(); + synchronize_sched(); /* Allow interrupts to complete. */ ret = 0; } sprintf (dbf_txt, "ret:%d", ret); diff --git a/kernel/module.c b/kernel/module.c index 2dbfa0773faf..5734ab09d3f9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod, /* Init routine failed: abort. Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; - synchronize_kernel(); + synchronize_sched(); if (mod->unsafe) printk(KERN_ERR "%s: module is now stuck!\n", mod->name); diff --git a/kernel/profile.c b/kernel/profile.c index a66be468c422..0221a50ca867 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *)) WARN_ON(hook != timer_hook); timer_hook = NULL; /* make sure all CPUs see the NULL hook */ - synchronize_kernel(); + synchronize_sched(); /* Allow ongoing interrupts to complete. */ } EXPORT_SYMBOL_GPL(register_timer_hook); diff --git a/mm/slab.c b/mm/slab.c index 771cc09f9f1a..840742641152 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_kernel(); + synchronize_rcu(); /* no cpu_online check required here since we clear the percpu * array on cpu offline and set this to NULL. diff --git a/net/core/dev.c b/net/core/dev.c index 7bd4cd4502c4..f5f005846fe1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev) void synchronize_net(void) { might_sleep(); - synchronize_kernel(); + synchronize_rcu(); } /**