Phonet: convert bound sockets hash list to RCU

This gets rid of the last spinlock in the Phonet stack proper.

Signed-off-by: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rémi Denis-Courmont 2011-04-12 23:27:36 +00:00 коммит произвёл David S. Miller
Родитель fce55922f5
Коммит 44f4d5a27e
1 изменённых файлов: 21 добавлений и 24 удалений

Просмотреть файл

@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock)
static struct {
struct hlist_head hlist[PN_HASHSIZE];
spinlock_t lock;
struct mutex lock;
} pnsocks;
void __init pn_sock_init(void)
@ -61,7 +61,7 @@ void __init pn_sock_init(void)
for (i = 0; i < PN_HASHSIZE; i++)
INIT_HLIST_HEAD(pnsocks.hlist + i);
spin_lock_init(&pnsocks.lock);
mutex_init(&pnsocks.lock);
}
static struct hlist_head *pn_hash_list(u16 obj)
@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
u8 res = spn->spn_resource;
struct hlist_head *hlist = pn_hash_list(obj);
spin_lock_bh(&pnsocks.lock);
sk_for_each(sknode, node, hlist) {
rcu_read_lock();
sk_for_each_rcu(sknode, node, hlist) {
struct pn_sock *pn = pn_sk(sknode);
BUG_ON(!pn->sobject); /* unbound socket */
@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
sock_hold(sknode);
break;
}
spin_unlock_bh(&pnsocks.lock);
rcu_read_unlock();
return rval;
}
@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
struct hlist_head *hlist = pnsocks.hlist;
unsigned h;
spin_lock(&pnsocks.lock);
rcu_read_lock();
for (h = 0; h < PN_HASHSIZE; h++) {
struct hlist_node *node;
struct sock *sknode;
@ -140,25 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
}
hlist++;
}
spin_unlock(&pnsocks.lock);
rcu_read_unlock();
}
void pn_sock_hash(struct sock *sk)
{
struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
spin_lock_bh(&pnsocks.lock);
sk_add_node(sk, hlist);
spin_unlock_bh(&pnsocks.lock);
mutex_lock(&pnsocks.lock);
sk_add_node_rcu(sk, hlist);
mutex_unlock(&pnsocks.lock);
}
EXPORT_SYMBOL(pn_sock_hash);
void pn_sock_unhash(struct sock *sk)
{
spin_lock_bh(&pnsocks.lock);
sk_del_node_init(sk);
spin_unlock_bh(&pnsocks.lock);
mutex_lock(&pnsocks.lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&pnsocks.lock);
pn_sock_unbind_all_res(sk);
synchronize_rcu();
}
EXPORT_SYMBOL(pn_sock_unhash);
@ -548,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
unsigned h;
for (h = 0; h < PN_HASHSIZE; h++) {
sk_for_each(sknode, node, hlist) {
sk_for_each_rcu(sknode, node, hlist) {
if (!net_eq(net, sock_net(sknode)))
continue;
if (!pos)
@ -572,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
}
static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(pnsocks.lock)
__acquires(rcu)
{
spin_lock_bh(&pnsocks.lock);
rcu_read_lock();
return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
@ -591,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void pn_sock_seq_stop(struct seq_file *seq, void *v)
__releases(pnsocks.lock)
__releases(rcu)
{
spin_unlock_bh(&pnsocks.lock);
rcu_read_unlock();
}
static int pn_sock_seq_show(struct seq_file *seq, void *v)
@ -721,13 +720,11 @@ void pn_sock_unbind_all_res(struct sock *sk)
}
mutex_unlock(&resource_mutex);
if (match == 0)
return;
synchronize_rcu();
while (match > 0) {
sock_put(sk);
__sock_put(sk);
match--;
}
/* Caller is responsible for RCU sync before final sock_put() */
}
#ifdef CONFIG_PROC_FS