net: sk_sleep() helper
Define a new function to return the waitqueue of a "struct sock". static inline wait_queue_head_t *sk_sleep(struct sock *sk) { return sk->sk_sleep; } Change all read occurrences of sk_sleep by a call to this function. Needed for a future RCU conversion. sk_sleep wont be a field directly available. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
ab9304717f
Коммит
aa39514516
|
@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
|
|||
*(struct atm_vcc **) &new_msg->vcc = vcc;
|
||||
old_test = test_bit(flag,&vcc->flags);
|
||||
out_vcc->push(out_vcc,skb);
|
||||
add_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
|
||||
add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
|
||||
while (test_bit(flag,&vcc->flags) == old_test) {
|
||||
mb();
|
||||
out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
|
||||
|
@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
|
|||
schedule();
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
|
|||
msg->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
wake_up(sk_atm(vcc)->sk_sleep);
|
||||
wake_up(sk_sleep(sk_atm(vcc)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -246,8 +246,8 @@ static void macvtap_sock_write_space(struct sock *sk)
|
|||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
|
||||
return;
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND);
|
||||
}
|
||||
|
||||
static int macvtap_open(struct inode *inode, struct file *file)
|
||||
|
|
|
@ -868,8 +868,8 @@ static void tun_sock_write_space(struct sock *sk)
|
|||
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
|
||||
return;
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
|
||||
tun = tun_sk(sk)->tun;
|
||||
|
|
|
@ -599,9 +599,9 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
|||
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
|
||||
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
|
||||
|
||||
if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
|
||||
if (sk_sleep(sock->sk) && waitqueue_active(sk_sleep(sock->sk))) {
|
||||
sock->sk->sk_err = EIO;
|
||||
wake_up_interruptible(sock->sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sock->sk));
|
||||
}
|
||||
|
||||
iscsi_conn_stop(cls_conn, flag);
|
||||
|
|
|
@ -1160,6 +1160,10 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
|
|||
sk->sk_socket = sock;
|
||||
}
|
||||
|
||||
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
|
||||
{
|
||||
return sk->sk_sleep;
|
||||
}
|
||||
/* Detach socket from process context.
|
||||
* Announce socket dead, detach it from wait queue and inode.
|
||||
* Note that parent inode held reference count on this struct sock,
|
||||
|
@ -1346,8 +1350,8 @@ static inline int sk_has_allocations(const struct sock *sk)
|
|||
* tp->rcv_nxt check sock_def_readable
|
||||
* ... {
|
||||
* schedule ...
|
||||
* if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
* wake_up_interruptible(sk->sk_sleep)
|
||||
* if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
* wake_up_interruptible(sk_sleep(sk))
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
|
@ -1368,7 +1372,7 @@ static inline int sk_has_sleeper(struct sock *sk)
|
|||
* This memory barrier is paired in the sock_poll_wait.
|
||||
*/
|
||||
smp_mb__after_lock();
|
||||
return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
|
||||
return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -939,7 +939,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
tp->ucopy.memory = 0;
|
||||
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep,
|
||||
wake_up_interruptible_sync_poll(sk_sleep(sk),
|
||||
POLLIN | POLLRDNORM | POLLRDBAND);
|
||||
if (!inet_csk_ack_scheduled(sk))
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
||||
|
|
|
@ -92,7 +92,7 @@ static void vcc_def_wakeup(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up(sk->sk_sleep);
|
||||
wake_up(sk_sleep(sk));
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk)
|
|||
|
||||
if (vcc_writable(sk)) {
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
|
@ -549,7 +549,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
|
|||
}
|
||||
|
||||
eff = (size+3) & ~3; /* align to word boundary */
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
error = 0;
|
||||
while (!(skb = alloc_tx(vcc, eff))) {
|
||||
if (m->msg_flags & MSG_DONTWAIT) {
|
||||
|
@ -568,9 +568,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
|
|||
send_sig(SIGPIPE, current, 0);
|
||||
break;
|
||||
}
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (error)
|
||||
goto out;
|
||||
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
||||
|
@ -595,7 +595,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
struct atm_vcc *vcc;
|
||||
unsigned int mask;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
vcc = ATM_SD(sock);
|
||||
|
|
|
@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|||
}
|
||||
sk->sk_ack_backlog++;
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
|
||||
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
|
||||
sk->sk_state_change(sk);
|
||||
as_indicate_complete:
|
||||
release_sock(sk);
|
||||
|
|
|
@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc)
|
|||
|
||||
pr_debug("%p\n", vcc);
|
||||
if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
sigd_enq(vcc, as_close, NULL, NULL, NULL);
|
||||
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
}
|
||||
/* beware - socket is still in use by atmsigd until the last
|
||||
as_indicate has been answered */
|
||||
|
@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
|
|||
}
|
||||
vcc->local = *addr;
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
|
||||
if (!sigd) {
|
||||
error = -EUNATCH;
|
||||
|
@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
|
|||
}
|
||||
vcc->remote = *addr;
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
|
||||
if (flags & O_NONBLOCK) {
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
sock->state = SS_CONNECTING;
|
||||
error = -EINPROGRESS;
|
||||
goto out;
|
||||
|
@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
|
|||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
if (!signal_pending(current)) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
continue;
|
||||
}
|
||||
|
@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
|
|||
*/
|
||||
sigd_enq(vcc, as_close, NULL, NULL, NULL);
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
}
|
||||
if (!sk->sk_err)
|
||||
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
|
||||
sigd) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
|
|||
error = -EINTR;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (error)
|
||||
goto out;
|
||||
if (!sigd) {
|
||||
|
@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog)
|
|||
goto out;
|
||||
}
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (!sigd) {
|
||||
error = -EUNATCH;
|
||||
goto out;
|
||||
|
@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
while (1) {
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
|
||||
sigd) {
|
||||
if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
|
||||
|
@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
error = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (error)
|
||||
goto out;
|
||||
if (!skb) {
|
||||
|
@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
}
|
||||
/* wait should be short, so we ignore the non-blocking flag */
|
||||
set_bit(ATM_VF_WAITING, &new_vcc->flags);
|
||||
prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
|
||||
while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
|
||||
release_sock(sk);
|
||||
schedule();
|
||||
lock_sock(sk);
|
||||
prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
|
||||
if (!sigd) {
|
||||
error = -EUNATCH;
|
||||
goto out;
|
||||
|
@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
|
||||
!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (!sigd)
|
||||
return -EUNATCH;
|
||||
return -sk->sk_err;
|
||||
|
@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
|
|||
|
||||
lock_sock(sk);
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
sigd_enq(vcc, as_addparty, NULL, NULL,
|
||||
(struct sockaddr_atmsvc *) sockaddr);
|
||||
if (flags & O_NONBLOCK) {
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
error = -EINPROGRESS;
|
||||
goto out;
|
||||
}
|
||||
pr_debug("added wait queue\n");
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
error = xchg(&sk->sk_err_soft, 0);
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
|
|||
|
||||
lock_sock(sk);
|
||||
set_bit(ATM_VF_WAITING, &vcc->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
|
||||
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
|
||||
schedule();
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (!sigd) {
|
||||
error = -EUNATCH;
|
||||
goto out;
|
||||
|
|
|
@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock,
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (sk->sk_state != TCP_SYN_SENT)
|
||||
break;
|
||||
|
@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock,
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto out_release;
|
||||
|
@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
* hooked into the SABM we saved
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
break;
|
||||
|
@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
|
|
@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
|
|||
|
||||
BT_DBG("sock %p, sk %p", sock, sk);
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
if (sk->sk_state == BT_LISTEN)
|
||||
return bt_accept_poll(sk);
|
||||
|
@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
|
|||
|
||||
BT_DBG("sk %p", sk);
|
||||
|
||||
add_wait_queue(sk->sk_sleep, &wait);
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
while (sk->sk_state != state) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
|
@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
|
|||
break;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_wait_state);
|
||||
|
|
|
@ -474,7 +474,7 @@ static int bnep_session(void *arg)
|
|||
set_user_nice(current, -15);
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(sk->sk_sleep, &wait);
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
while (!atomic_read(&s->killed)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
|
@ -496,7 +496,7 @@ static int bnep_session(void *arg)
|
|||
schedule();
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
/* Cleanup session */
|
||||
down_write(&bnep_session_sem);
|
||||
|
@ -507,7 +507,7 @@ static int bnep_session(void *arg)
|
|||
/* Wakeup user-space polling for socket errors */
|
||||
s->sock->sk->sk_err = EUNATCH;
|
||||
|
||||
wake_up_interruptible(s->sock->sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(s->sock->sk));
|
||||
|
||||
/* Release the socket */
|
||||
fput(s->sock->file);
|
||||
|
@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
|
|||
|
||||
/* Kill session thread */
|
||||
atomic_inc(&s->killed);
|
||||
wake_up_interruptible(s->sock->sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(s->sock->sk));
|
||||
} else
|
||||
err = -ENOENT;
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
|
|||
}
|
||||
|
||||
skb_queue_tail(&sk->sk_write_queue, skb);
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
|
|||
/*
|
||||
* We cannot send L2CAP packets from here as we are potentially in a bh.
|
||||
* So we have to queue them and wake up session thread which is sleeping
|
||||
* on the sk->sk_sleep.
|
||||
* on the sk_sleep(sk).
|
||||
*/
|
||||
dev->trans_start = jiffies;
|
||||
skb_queue_tail(&sk->sk_write_queue, skb);
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
|
||||
if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
|
||||
BT_DBG("tx queue is full");
|
||||
|
|
|
@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
|
|||
{
|
||||
struct sock *sk = session->sock->sk;
|
||||
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/* CMTP init defines */
|
||||
|
|
|
@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
|
|||
set_user_nice(current, -15);
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(sk->sk_sleep, &wait);
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
while (!atomic_read(&session->terminate)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
|
@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
|
|||
schedule();
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
down_write(&cmtp_session_sem);
|
||||
|
||||
|
|
|
@ -561,8 +561,8 @@ static int hidp_session(void *arg)
|
|||
|
||||
init_waitqueue_entry(&ctrl_wait, current);
|
||||
init_waitqueue_entry(&intr_wait, current);
|
||||
add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
|
||||
add_wait_queue(intr_sk->sk_sleep, &intr_wait);
|
||||
add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
|
||||
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
|
||||
while (!atomic_read(&session->terminate)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
|
@ -584,8 +584,8 @@ static int hidp_session(void *arg)
|
|||
schedule();
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(intr_sk->sk_sleep, &intr_wait);
|
||||
remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
|
||||
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
|
||||
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
|
||||
|
||||
down_write(&hidp_session_sem);
|
||||
|
||||
|
@ -609,7 +609,7 @@ static int hidp_session(void *arg)
|
|||
|
||||
fput(session->intr_sock->file);
|
||||
|
||||
wait_event_timeout(*(ctrl_sk->sk_sleep),
|
||||
wait_event_timeout(*(sk_sleep(ctrl_sk)),
|
||||
(ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
|
||||
|
||||
fput(session->ctrl_sock->file);
|
||||
|
|
|
@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
|
|||
struct sock *ctrl_sk = session->ctrl_sock->sk;
|
||||
struct sock *intr_sk = session->intr_sock->sk;
|
||||
|
||||
wake_up_interruptible(ctrl_sk->sk_sleep);
|
||||
wake_up_interruptible(intr_sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(ctrl_sk));
|
||||
wake_up_interruptible(sk_sleep(intr_sk));
|
||||
}
|
||||
|
||||
/* HIDP init defines */
|
||||
|
|
|
@ -1147,7 +1147,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
|
|||
BT_DBG("sk %p timeo %ld", sk, timeo);
|
||||
|
||||
/* Wait for an incoming connection. (wake-one). */
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!timeo) {
|
||||
|
@ -1170,7 +1170,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
|
|||
}
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto done;
|
||||
|
|
|
@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
|
|||
BT_DBG("sk %p timeo %ld", sk, timeo);
|
||||
|
||||
/* Wait for an incoming connection. (wake-one). */
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
while (!(nsk = bt_accept_dequeue(sk, newsock))) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!timeo) {
|
||||
|
@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
|
|||
}
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto done;
|
||||
|
@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
|
|||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
add_wait_queue(sk->sk_sleep, &wait);
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
|
@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
|
|||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
return timeo;
|
||||
}
|
||||
|
||||
|
|
|
@ -567,7 +567,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
|
|||
BT_DBG("sk %p timeo %ld", sk, timeo);
|
||||
|
||||
/* Wait for an incoming connection. (wake-one). */
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
while (!(ch = bt_accept_dequeue(sk, newsock))) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!timeo) {
|
||||
|
@ -590,7 +590,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
|
|||
}
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto done;
|
||||
|
|
|
@ -689,7 +689,7 @@ static unsigned int caif_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
||||
u32 mask = 0;
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
lock_sock(&(cf_sk->sk));
|
||||
if (!STATE_IS_OPEN(cf_sk)) {
|
||||
if (!STATE_IS_PENDING(cf_sk))
|
||||
|
|
|
@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
|
|||
int error;
|
||||
DEFINE_WAIT_FUNC(wait, receiver_wake_function);
|
||||
|
||||
prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/* Socket errors? */
|
||||
error = sock_error(sk);
|
||||
|
@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
|
|||
error = 0;
|
||||
*timeo_p = schedule_timeout(*timeo_p);
|
||||
out:
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return error;
|
||||
interrupted:
|
||||
error = sock_intr_errno(*timeo_p);
|
||||
|
@ -726,7 +726,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
|
|
@ -1395,7 +1395,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
|
|||
if (signal_pending(current))
|
||||
break;
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
|
||||
break;
|
||||
if (sk->sk_shutdown & SEND_SHUTDOWN)
|
||||
|
@ -1404,7 +1404,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
|
|||
break;
|
||||
timeo = schedule_timeout(timeo);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return timeo;
|
||||
}
|
||||
|
||||
|
@ -1570,11 +1570,11 @@ int sk_wait_data(struct sock *sk, long *timeo)
|
|||
int rc;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
|
||||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_wait_data);
|
||||
|
@ -1798,7 +1798,7 @@ static void sock_def_wakeup(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
wake_up_interruptible_all(sk_sleep(sk));
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
|
@ -1806,7 +1806,7 @@ static void sock_def_error_report(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
|
||||
wake_up_interruptible_poll(sk_sleep(sk), POLLERR);
|
||||
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
@ -1815,7 +1815,7 @@ static void sock_def_readable(struct sock *sk, int len)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
|
||||
wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -1830,7 +1830,7 @@ static void sock_def_write_space(struct sock *sk)
|
|||
*/
|
||||
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
|
||||
wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
|
||||
/* Should agree with poll, otherwise some programs break */
|
||||
|
|
|
@ -32,8 +32,8 @@ void sk_stream_write_space(struct sock *sk)
|
|||
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
|
||||
clear_bit(SOCK_NOSPACE, &sock->flags);
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_poll(sk->sk_sleep, POLLOUT |
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible_poll(sk_sleep(sk), POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
@ -66,13 +66,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
|
|||
if (signal_pending(tsk))
|
||||
return sock_intr_errno(*timeo_p);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
sk->sk_write_pending++;
|
||||
done = sk_wait_event(sk, timeo_p,
|
||||
!sk->sk_err &&
|
||||
!((1 << sk->sk_state) &
|
||||
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending--;
|
||||
} while (!done);
|
||||
return 0;
|
||||
|
@ -96,13 +96,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
do {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
|
||||
break;
|
||||
} while (!signal_pending(current) && timeout);
|
||||
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|||
while (1) {
|
||||
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
goto do_error;
|
||||
|
@ -157,7 +157,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|||
*timeo_p = current_timeo;
|
||||
}
|
||||
out:
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
|
||||
do_error:
|
||||
|
|
|
@ -198,7 +198,7 @@ void dccp_write_space(struct sock *sk)
|
|||
read_lock(&sk->sk_callback_lock);
|
||||
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
/* Should agree with poll, otherwise some programs break */
|
||||
if (sock_writeable(sk))
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
@ -225,7 +225,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
|
|||
dccp_pr_debug("delayed send by %d msec\n", delay);
|
||||
jiffdelay = msecs_to_jiffies(delay);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
sk->sk_write_pending++;
|
||||
release_sock(sk);
|
||||
|
@ -241,7 +241,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
|
|||
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
|
||||
} while ((delay = rc) > 0);
|
||||
out:
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
|
||||
do_error:
|
||||
|
|
|
@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
|
|||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
if (sk->sk_state == DCCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
|
|
|
@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
|
|||
scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
|
||||
dn_send_conn_conf(sk, allocation);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
for(;;) {
|
||||
release_sock(sk);
|
||||
if (scp->state == DN_CC)
|
||||
|
@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
|
|||
err = -EAGAIN;
|
||||
if (!*timeo)
|
||||
break;
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (err == 0) {
|
||||
sk->sk_socket->state = SS_CONNECTED;
|
||||
} else if (scp->state != DN_CC) {
|
||||
|
@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
|
|||
if (!*timeo)
|
||||
return -EALREADY;
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
for(;;) {
|
||||
release_sock(sk);
|
||||
if (scp->state == DN_CI || scp->state == DN_CC)
|
||||
|
@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo)
|
|||
err = -ETIMEDOUT;
|
||||
if (!*timeo)
|
||||
break;
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
out:
|
||||
if (err == 0) {
|
||||
sk->sk_socket->state = SS_CONNECTED;
|
||||
|
@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
|
|||
struct sk_buff *skb = NULL;
|
||||
int err = 0;
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
for(;;) {
|
||||
release_sock(sk);
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
|
@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
|
|||
err = -EAGAIN;
|
||||
if (!*timeo)
|
||||
break;
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
return skb == NULL ? ERR_PTR(err) : skb;
|
||||
}
|
||||
|
@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
goto out;
|
||||
}
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
|
||||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
}
|
||||
|
||||
skb_queue_walk_safe(queue, skb, n) {
|
||||
|
@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
goto out;
|
||||
}
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
sk_wait_event(sk, &timeo,
|
||||
!dn_queue_too_long(scp, queue, flags));
|
||||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -548,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
|
|||
{
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/* Basic assumption: if someone sets sk->sk_err, he _must_
|
||||
* change state of the socket from TCP_SYN_*.
|
||||
|
@ -561,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
|
|||
lock_sock(sk);
|
||||
if (signal_pending(current) || !timeo)
|
||||
break;
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return timeo;
|
||||
}
|
||||
|
||||
|
|
|
@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|||
* having to remove and re-insert us on the wait queue.
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait_exclusive(sk->sk_sleep, &wait,
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
release_sock(sk);
|
||||
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
|
||||
|
@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|||
if (!timeo)
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
struct sock *sk = sock->sk;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
|
|
|
@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
|
|||
self->tx_flow = flow;
|
||||
IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
|
||||
__func__);
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
break;
|
||||
default:
|
||||
IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
|
||||
|
@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
if (flags & O_NONBLOCK)
|
||||
goto out;
|
||||
|
||||
err = wait_event_interruptible(*(sk->sk_sleep),
|
||||
err = wait_event_interruptible(*(sk_sleep(sk)),
|
||||
skb_peek(&sk->sk_receive_queue));
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
goto out;
|
||||
|
||||
err = -ERESTARTSYS;
|
||||
if (wait_event_interruptible(*(sk->sk_sleep),
|
||||
if (wait_event_interruptible(*(sk_sleep(sk)),
|
||||
(sk->sk_state != TCP_SYN_SENT)))
|
||||
goto out;
|
||||
|
||||
|
@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
|
||||
/* Check if IrTTP is wants us to slow down */
|
||||
|
||||
if (wait_event_interruptible(*(sk->sk_sleep),
|
||||
if (wait_event_interruptible(*(sk_sleep(sk)),
|
||||
(self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
|
||||
err = -ERESTARTSYS;
|
||||
goto out;
|
||||
|
@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
|
|||
if (copied >= target)
|
||||
break;
|
||||
|
||||
prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* POSIX 1003.1g mandates this order.
|
||||
|
@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
|
|||
/* Wait process until data arrives */
|
||||
schedule();
|
||||
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
|
|||
IRDA_DEBUG(4, "%s()\n", __func__);
|
||||
|
||||
lock_kernel();
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* Exceptional events? */
|
||||
|
|
|
@ -59,7 +59,7 @@ do { \
|
|||
DEFINE_WAIT(__wait); \
|
||||
long __timeo = timeo; \
|
||||
ret = 0; \
|
||||
prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
|
||||
prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
|
||||
while (!(condition)) { \
|
||||
if (!__timeo) { \
|
||||
ret = -EAGAIN; \
|
||||
|
@ -76,7 +76,7 @@ do { \
|
|||
if (ret) \
|
||||
break; \
|
||||
} \
|
||||
finish_wait(sk->sk_sleep, &__wait); \
|
||||
finish_wait(sk_sleep(sk), &__wait); \
|
||||
} while (0)
|
||||
|
||||
#define iucv_sock_wait(sk, condition, timeo) \
|
||||
|
@ -307,7 +307,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
|
|||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
wake_up_interruptible_all(sk_sleep(sk));
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
@ -795,7 +795,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
|
|||
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
||||
|
||||
/* Wait for an incoming connection */
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!timeo) {
|
||||
|
@ -819,7 +819,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
|
|||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto done;
|
||||
|
@ -1269,7 +1269,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask = 0;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
if (sk->sk_state == IUCV_LISTEN)
|
||||
return iucv_accept_poll(sk);
|
||||
|
|
|
@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
|
|||
int rc = 0;
|
||||
|
||||
while (1) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
|
||||
break;
|
||||
rc = -ERESTARTSYS;
|
||||
|
@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
|
|||
break;
|
||||
rc = 0;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
while (1) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
|
||||
break;
|
||||
if (signal_pending(current) || !timeout)
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return timeout;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
|
|||
int rc;
|
||||
|
||||
while (1) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
rc = 0;
|
||||
if (sk_wait_event(sk, &timeout,
|
||||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
|
||||
|
@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
|
|||
if (!timeout)
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -802,7 +802,7 @@ static int sync_thread_backup(void *data)
|
|||
ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
|
||||
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
|
||||
!skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
|
||||
|| kthread_should_stop());
|
||||
|
||||
|
|
|
@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (sk->sk_state != TCP_SYN_SENT)
|
||||
break;
|
||||
|
@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (err)
|
||||
goto out_release;
|
||||
}
|
||||
|
@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
* hooked into the SABM we saved
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
break;
|
||||
|
@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (err)
|
||||
goto out_release;
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
|
|||
unsigned int mask = 0;
|
||||
unsigned long flags;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
if (rs->rs_seen_congestion)
|
||||
poll_wait(file, &rds_poll_waitq, wait);
|
||||
|
|
|
@ -492,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs);
|
|||
void rds_wake_sk_sleep(struct rds_sock *rs);
|
||||
static inline void __rds_wake_sk_sleep(struct sock *sk)
|
||||
{
|
||||
wait_queue_head_t *waitq = sk->sk_sleep;
|
||||
wait_queue_head_t *waitq = sk_sleep(sk);
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD) && waitq)
|
||||
wake_up(waitq);
|
||||
|
|
|
@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
|||
break;
|
||||
}
|
||||
|
||||
timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
|
||||
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
|
||||
(!list_empty(&rs->rs_notify_queue) ||
|
||||
rs->rs_cong_notify ||
|
||||
rds_next_incoming(rs, &inc)), timeo);
|
||||
|
|
|
@ -915,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
|||
goto out;
|
||||
}
|
||||
|
||||
timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
|
||||
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
|
||||
rds_send_queue_rm(rs, conn, rm,
|
||||
rs->rs_bound_port,
|
||||
dport,
|
||||
|
|
|
@ -845,7 +845,7 @@ rose_try_next_neigh:
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait,
|
||||
prepare_to_wait(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (sk->sk_state != TCP_SYN_SENT)
|
||||
break;
|
||||
|
@ -858,7 +858,7 @@ rose_try_next_neigh:
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
if (err)
|
||||
goto out_release;
|
||||
|
@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
* hooked into the SABM we saved
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
skb = skb_dequeue(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
|
@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
err = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
if (err)
|
||||
goto out_release;
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ static void rxrpc_write_space(struct sock *sk)
|
|||
read_lock(&sk->sk_callback_lock);
|
||||
if (rxrpc_writable(sk)) {
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -589,7 +589,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
|
|||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* the socket is readable if there are any messages waiting on the Rx
|
||||
|
|
|
@ -5702,7 +5702,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
|||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
unsigned int mask;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
/* A TCP-style listening socket becomes readable when the accept queue
|
||||
* is not empty.
|
||||
|
@ -5943,7 +5943,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
|
|||
int error;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/* Socket errors? */
|
||||
error = sock_error(sk);
|
||||
|
@ -5980,14 +5980,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
|
|||
sctp_lock_sock(sk);
|
||||
|
||||
ready:
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return 0;
|
||||
|
||||
interrupted:
|
||||
error = sock_intr_errno(*timeo_p);
|
||||
|
||||
out:
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
*err = error;
|
||||
return error;
|
||||
}
|
||||
|
@ -6061,8 +6061,8 @@ static void __sctp_write_space(struct sctp_association *asoc)
|
|||
wake_up_interruptible(&asoc->wait);
|
||||
|
||||
if (sctp_writeable(sk)) {
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
|
||||
/* Note that we try to include the Async I/O support
|
||||
* here by modeling from the current TCP/UDP code.
|
||||
|
@ -6296,7 +6296,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
|
|||
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait_exclusive(sk->sk_sleep, &wait,
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
|
||||
if (list_empty(&ep->asocs)) {
|
||||
|
@ -6322,7 +6322,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
|
|||
break;
|
||||
}
|
||||
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -6332,7 +6332,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
|
|||
DEFINE_WAIT(wait);
|
||||
|
||||
do {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
if (list_empty(&sctp_sk(sk)->ep->asocs))
|
||||
break;
|
||||
sctp_release_sock(sk);
|
||||
|
@ -6340,7 +6340,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
|
|||
sctp_lock_sock(sk);
|
||||
} while (!signal_pending(current) && timeout);
|
||||
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
}
|
||||
|
||||
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
|
||||
|
|
|
@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
|
|||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
svc_xprt_enqueue(&svsk->sk_xprt);
|
||||
}
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk)
|
|||
svc_xprt_enqueue(&svsk->sk_xprt);
|
||||
}
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
|
||||
dprintk("RPC svc_write_space: someone sleeping on %p\n",
|
||||
svsk);
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -757,8 +757,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
|
|||
printk("svc: socket %p: no user data\n", sk);
|
||||
}
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible_all(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -777,8 +777,8 @@ static void svc_tcp_state_change(struct sock *sk)
|
|||
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
|
||||
svc_xprt_enqueue(&svsk->sk_xprt);
|
||||
}
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible_all(sk_sleep(sk));
|
||||
}
|
||||
|
||||
static void svc_tcp_data_ready(struct sock *sk, int count)
|
||||
|
@ -791,8 +791,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
|
|||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
svc_xprt_enqueue(&svsk->sk_xprt);
|
||||
}
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1494,8 +1494,8 @@ static void svc_sock_detach(struct svc_xprt *xprt)
|
|||
sk->sk_data_ready = svsk->sk_odata;
|
||||
sk->sk_write_space = svsk->sk_owspace;
|
||||
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk;
|
||||
u32 mask;
|
||||
|
||||
poll_wait(file, sk->sk_sleep, wait);
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue) ||
|
||||
(sock->state == SS_UNCONNECTED) ||
|
||||
|
@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
|
|||
break;
|
||||
}
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible(*sk->sk_sleep,
|
||||
res = wait_event_interruptible(*sk_sleep(sk),
|
||||
!tport->congested);
|
||||
lock_sock(sk);
|
||||
if (res)
|
||||
|
@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
|
|||
break;
|
||||
}
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible(*sk->sk_sleep,
|
||||
res = wait_event_interruptible(*sk_sleep(sk),
|
||||
(!tport->congested || !tport->connected));
|
||||
lock_sock(sk);
|
||||
if (res)
|
||||
|
@ -931,7 +931,7 @@ restart:
|
|||
goto exit;
|
||||
}
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible(*sk->sk_sleep,
|
||||
res = wait_event_interruptible(*sk_sleep(sk),
|
||||
(!skb_queue_empty(&sk->sk_receive_queue) ||
|
||||
(sock->state == SS_DISCONNECTING)));
|
||||
lock_sock(sk);
|
||||
|
@ -1064,7 +1064,7 @@ restart:
|
|||
goto exit;
|
||||
}
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible(*sk->sk_sleep,
|
||||
res = wait_event_interruptible(*sk_sleep(sk),
|
||||
(!skb_queue_empty(&sk->sk_receive_queue) ||
|
||||
(sock->state == SS_DISCONNECTING)));
|
||||
lock_sock(sk);
|
||||
|
@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
|
|||
tipc_disconnect_port(tipc_sk_port(sk));
|
||||
}
|
||||
|
||||
if (waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
return TIPC_OK;
|
||||
}
|
||||
|
||||
|
@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport)
|
|||
{
|
||||
struct sock *sk = (struct sock *)tport->usr_handle;
|
||||
|
||||
if (waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
|
|||
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
|
||||
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible_timeout(*sk->sk_sleep,
|
||||
res = wait_event_interruptible_timeout(*sk_sleep(sk),
|
||||
(!skb_queue_empty(&sk->sk_receive_queue) ||
|
||||
(sock->state != SS_CONNECTING)),
|
||||
sk->sk_rcvtimeo);
|
||||
|
@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
|
|||
goto exit;
|
||||
}
|
||||
release_sock(sk);
|
||||
res = wait_event_interruptible(*sk->sk_sleep,
|
||||
res = wait_event_interruptible(*sk_sleep(sk),
|
||||
(!skb_queue_empty(&sk->sk_receive_queue)));
|
||||
lock_sock(sk);
|
||||
if (res)
|
||||
|
@ -1632,8 +1632,8 @@ restart:
|
|||
/* Discard any unreceived messages; wake up sleeping tasks */
|
||||
|
||||
discard_rx_queue(sk);
|
||||
if (waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible(sk->sk_sleep);
|
||||
if (waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
res = 0;
|
||||
break;
|
||||
|
||||
|
|
|
@ -316,7 +316,7 @@ static void unix_write_space(struct sock *sk)
|
|||
read_lock(&sk->sk_callback_lock);
|
||||
if (unix_writable(sk)) {
|
||||
if (sk_has_sleeper(sk))
|
||||
wake_up_interruptible_sync(sk->sk_sleep);
|
||||
wake_up_interruptible_sync(sk_sleep(sk));
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
|
@ -1736,7 +1736,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
|
|||
unix_state_lock(sk);
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue) ||
|
||||
sk->sk_err ||
|
||||
|
@ -1752,7 +1752,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
|
|||
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
|
||||
}
|
||||
|
||||
finish_wait(sk->sk_sleep, &wait);
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
unix_state_unlock(sk);
|
||||
return timeo;
|
||||
}
|
||||
|
@ -1991,7 +1991,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
|
|||
struct sock *sk = sock->sk;
|
||||
unsigned int mask;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
@ -2028,7 +2028,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
|
|||
struct sock *sk = sock->sk, *other;
|
||||
unsigned int mask, writable;
|
||||
|
||||
sock_poll_wait(file, sk->sk_sleep, wait);
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* exceptional events? */
|
||||
|
|
|
@ -718,7 +718,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
|
|||
DECLARE_WAITQUEUE(wait, current);
|
||||
int rc;
|
||||
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
for (;;) {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
rc = -ERESTARTSYS;
|
||||
|
@ -738,7 +738,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
|
|||
break;
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -838,7 +838,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
|
|||
DECLARE_WAITQUEUE(wait, current);
|
||||
int rc = 0;
|
||||
|
||||
add_wait_queue_exclusive(sk->sk_sleep, &wait);
|
||||
add_wait_queue_exclusive(sk_sleep(sk), &wait);
|
||||
for (;;) {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
|
@ -858,7 +858,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
|
|||
break;
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче