Merge branch 'netns-Optimise-netns-ID-lookups'

Guillaume Nault says:

====================
netns: Optimise netns ID lookups

Netns ID lookups can be easily protected by RCU, rather than by holding
a spinlock.

Patch 1 prepares the code, patch 2 does the RCU conversion, and finally
patch 3 stops disabling BHs on updates (patch 2 makes that unnecessary).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-01-14 11:28:41 -08:00
Родитель 621400366b 8d7e5dee97
Коммит 169af34684
1 изменённых файлов: 38 добавлений и 55 удалений

Просмотреть файл

@ -211,16 +211,10 @@ static int net_eq_idr(int id, void *net, void *peer)
return 0;
}
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
* is set to true, thus the caller knows that the new id must be notified via
* rtnl.
*/
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
/* Must be called from RCU-critical section or with nsid_lock held */
static int __peernet2id(const struct net *net, struct net *peer)
{
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
bool alloc_it = *alloc;
*alloc = false;
/* Magic value for id 0. */
if (id == NET_ID_ZERO)
@ -228,23 +222,9 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
if (id > 0)
return id;
if (alloc_it) {
id = alloc_netid(net, peer, -1);
*alloc = true;
return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
}
return NETNSA_NSID_NOT_ASSIGNED;
}
/* should be called with nsid_lock held */
static int __peernet2id(struct net *net, struct net *peer)
{
bool no = false;
return __peernet2id_alloc(net, peer, &no);
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
struct nlmsghdr *nlh, gfp_t gfp);
/* This function returns the id of a peer netns. If no id is assigned, one will
@ -252,26 +232,37 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
*/
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
{
bool alloc = false, alive = false;
int id;
if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock);
/*
* When peer is obtained from RCU lists, we may race with
spin_lock(&net->nsid_lock);
id = __peernet2id(net, peer);
if (id >= 0) {
spin_unlock(&net->nsid_lock);
return id;
}
/* When peer is obtained from RCU lists, we may race with
* its cleanup. Check whether it's alive, and this guarantees
* we never hash a peer back to net->netns_ids, after it has
* just been idr_remove()'d from there in cleanup_net().
*/
if (maybe_get_net(peer))
alive = alloc = true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
if (alive)
put_net(peer);
if (!maybe_get_net(peer)) {
spin_unlock(&net->nsid_lock);
return NETNSA_NSID_NOT_ASSIGNED;
}
id = alloc_netid(net, peer, -1);
spin_unlock(&net->nsid_lock);
put_net(peer);
if (id < 0)
return NETNSA_NSID_NOT_ASSIGNED;
rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
return id;
}
EXPORT_SYMBOL_GPL(peernet2id_alloc);
@ -281,9 +272,10 @@ int peernet2id(struct net *net, struct net *peer)
{
int id;
spin_lock_bh(&net->nsid_lock);
rcu_read_lock();
id = __peernet2id(net, peer);
spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock();
return id;
}
EXPORT_SYMBOL(peernet2id);
@ -528,20 +520,20 @@ static void unhash_nsid(struct net *net, struct net *last)
for_each_net(tmp) {
int id;
spin_lock_bh(&tmp->nsid_lock);
spin_lock(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock);
spin_unlock(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
GFP_KERNEL);
if (tmp == last)
break;
}
spin_lock_bh(&net->nsid_lock);
spin_lock(&net->nsid_lock);
idr_destroy(&net->netns_ids);
spin_unlock_bh(&net->nsid_lock);
spin_unlock(&net->nsid_lock);
}
static LLIST_HEAD(cleanup_list);
@ -754,9 +746,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
spin_lock_bh(&net->nsid_lock);
spin_lock(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
spin_unlock_bh(&net->nsid_lock);
spin_unlock(&net->nsid_lock);
err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
@ -765,7 +757,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = alloc_netid(net, peer, nsid);
spin_unlock_bh(&net->nsid_lock);
spin_unlock(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
nlh, GFP_KERNEL);
@ -950,6 +942,7 @@ struct rtnl_net_dump_cb {
int s_idx;
};
/* Runs in RCU-critical section. */
static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
@ -1034,19 +1027,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
goto end;
}
spin_lock_bh(&net_cb.tgt_net->nsid_lock);
if (net_cb.fillargs.add_ref &&
!net_eq(net_cb.ref_net, net_cb.tgt_net) &&
!spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
err = -EAGAIN;
goto end;
}
rcu_read_lock();
idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
if (net_cb.fillargs.add_ref &&
!net_eq(net_cb.ref_net, net_cb.tgt_net))
spin_unlock_bh(&net_cb.ref_net->nsid_lock);
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
rcu_read_unlock();
cb->args[0] = net_cb.idx;
end: