net: Reflect all pernet_operations are converted

All pernet_operations are reviewed and converted, hooray!
Reflect this in core code: setup_net() and cleanup_net()
will take down_read() always.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Kirill Tkhai 2018-03-27 18:02:01 +03:00 коммит произвёл David S. Miller
Родитель 67441c2472
Коммит 094374e5e1
1 изменённых файлов: 6 добавлений и 37 удалений

Просмотреть файл

@ -40,9 +40,8 @@ struct net init_net = {
EXPORT_SYMBOL(init_net); EXPORT_SYMBOL(init_net);
static bool init_net_initialized; static bool init_net_initialized;
static unsigned nr_sync_pernet_ops;
/* /*
* net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops, * net_sem: protects: pernet_list, net_generic_ids,
* init_net_initialized and first_device pointer. * init_net_initialized and first_device pointer.
*/ */
DECLARE_RWSEM(net_sem); DECLARE_RWSEM(net_sem);
@ -406,7 +405,6 @@ struct net *copy_net_ns(unsigned long flags,
{ {
struct ucounts *ucounts; struct ucounts *ucounts;
struct net *net; struct net *net;
unsigned write;
int rv; int rv;
if (!(flags & CLONE_NEWNET)) if (!(flags & CLONE_NEWNET))
@ -424,25 +422,14 @@ struct net *copy_net_ns(unsigned long flags,
refcount_set(&net->passive, 1); refcount_set(&net->passive, 1);
net->ucounts = ucounts; net->ucounts = ucounts;
get_user_ns(user_ns); get_user_ns(user_ns);
again:
write = READ_ONCE(nr_sync_pernet_ops); rv = down_read_killable(&net_sem);
if (write)
rv = down_write_killable(&net_sem);
else
rv = down_read_killable(&net_sem);
if (rv < 0) if (rv < 0)
goto put_userns; goto put_userns;
if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
up_read(&net_sem);
goto again;
}
rv = setup_net(net, user_ns); rv = setup_net(net, user_ns);
if (write) up_read(&net_sem);
up_write(&net_sem);
else
up_read(&net_sem);
if (rv < 0) { if (rv < 0) {
put_userns: put_userns:
@ -490,21 +477,11 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last; struct net *net, *tmp, *last;
struct llist_node *net_kill_list; struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list); LIST_HEAD(net_exit_list);
unsigned write;
/* Atomically snapshot the list of namespaces to cleanup */ /* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list); net_kill_list = llist_del_all(&cleanup_list);
again:
write = READ_ONCE(nr_sync_pernet_ops);
if (write)
down_write(&net_sem);
else
down_read(&net_sem);
if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) { down_read(&net_sem);
up_read(&net_sem);
goto again;
}
/* Don't let anyone else find us. */ /* Don't let anyone else find us. */
rtnl_lock(); rtnl_lock();
@ -543,10 +520,7 @@ again:
list_for_each_entry_reverse(ops, &pernet_list, list) list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list);
if (write) up_read(&net_sem);
up_write(&net_sem);
else
up_read(&net_sem);
/* Ensure there are no outstanding rcu callbacks using this /* Ensure there are no outstanding rcu callbacks using this
* network namespace. * network namespace.
@ -1006,9 +980,6 @@ again:
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)
ida_remove(&net_generic_ids, *ops->id); ida_remove(&net_generic_ids, *ops->id);
} else if (!ops->async) {
pr_info_once("Pernet operations %ps are sync.\n", ops);
nr_sync_pernet_ops++;
} }
return error; return error;
@ -1016,8 +987,6 @@ again:
static void unregister_pernet_operations(struct pernet_operations *ops) static void unregister_pernet_operations(struct pernet_operations *ops)
{ {
if (!ops->async)
BUG_ON(nr_sync_pernet_ops-- == 0);
__unregister_pernet_operations(ops); __unregister_pernet_operations(ops);
rcu_barrier(); rcu_barrier();
if (ops->id) if (ops->id)