Staging: batman-adv: Don't remove interface with spinlock held
We call a lot of the netdevice code when holding if_list_lock which will spin the whole time. This is not necessary because we only want to protect the access to the list to be serialized. An extra queue can be used which hold all interfaces which should be removed and then use that queue without any locks for netdevice cleanup. We create a "scheduling while atomic" Oops when calling different netdevice related functions inside a spinlock protected area on a preemtible kernel. Reported-by: Rafal Lesniak <lesniak@eresi-project.org> Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Родитель
6df78338e7
Коммит
9ee898739b
|
@ -463,9 +463,6 @@ static void hardif_remove_interface(struct batman_if *batman_if)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
batman_if->if_status = IF_TO_BE_REMOVED;
|
batman_if->if_status = IF_TO_BE_REMOVED;
|
||||||
|
|
||||||
/* caller must take if_list_lock */
|
|
||||||
list_del_rcu(&batman_if->list);
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
sysfs_del_hardif(&batman_if->hardif_obj);
|
sysfs_del_hardif(&batman_if->hardif_obj);
|
||||||
hardif_put(batman_if);
|
hardif_put(batman_if);
|
||||||
|
@ -474,13 +471,21 @@ static void hardif_remove_interface(struct batman_if *batman_if)
|
||||||
void hardif_remove_interfaces(void)
|
void hardif_remove_interfaces(void)
|
||||||
{
|
{
|
||||||
struct batman_if *batman_if, *batman_if_tmp;
|
struct batman_if *batman_if, *batman_if_tmp;
|
||||||
|
struct list_head if_queue;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&if_queue);
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
spin_lock(&if_list_lock);
|
spin_lock(&if_list_lock);
|
||||||
list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
|
list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
|
||||||
hardif_remove_interface(batman_if);
|
list_del_rcu(&batman_if->list);
|
||||||
|
list_add_tail(&batman_if->list, &if_queue);
|
||||||
}
|
}
|
||||||
spin_unlock(&if_list_lock);
|
spin_unlock(&if_list_lock);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
|
||||||
|
hardif_remove_interface(batman_if);
|
||||||
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -507,8 +512,10 @@ static int hard_if_event(struct notifier_block *this,
|
||||||
break;
|
break;
|
||||||
case NETDEV_UNREGISTER:
|
case NETDEV_UNREGISTER:
|
||||||
spin_lock(&if_list_lock);
|
spin_lock(&if_list_lock);
|
||||||
hardif_remove_interface(batman_if);
|
list_del_rcu(&batman_if->list);
|
||||||
spin_unlock(&if_list_lock);
|
spin_unlock(&if_list_lock);
|
||||||
|
|
||||||
|
hardif_remove_interface(batman_if);
|
||||||
break;
|
break;
|
||||||
case NETDEV_CHANGEMTU:
|
case NETDEV_CHANGEMTU:
|
||||||
if (batman_if->soft_iface)
|
if (batman_if->soft_iface)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче