net: Fix use after free by removing length arg from sk_data_ready callbacks.
Several spots in the kernel perform a sequence like: skb_queue_tail(&sk->s_receive_queue, skb); sk->sk_data_ready(sk, skb->len); But at the moment we place the SKB onto the socket receive queue it can be consumed and freed up. So this skb->len access is potentially to freed up memory. Furthermore, the skb->len can be modified by the consumer so it is possible that the value isn't accurate. And finally, no actual implementation of this callback actually uses the length argument. And since nobody actually cared about it's value, lots of call sites pass arbitrary values in such as '0' and even '1'. So just remove the length argument from the callback, that way there is no confusion whatsoever and all of these use-after-free cases get fixed as a side effect. Based upon a patch by Eric Dumazet and his suggestion to audit this issue tree-wide. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
ad20d5f673
Коммит
676d23690f
|
@ -125,7 +125,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
|
static void iscsi_sw_tcp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct iscsi_conn *conn;
|
struct iscsi_conn *conn;
|
||||||
struct iscsi_tcp_conn *tcp_conn;
|
struct iscsi_tcp_conn *tcp_conn;
|
||||||
|
|
|
@ -40,7 +40,7 @@ struct iscsi_sw_tcp_conn {
|
||||||
|
|
||||||
struct iscsi_sw_tcp_send out;
|
struct iscsi_sw_tcp_send out;
|
||||||
/* old values for socket callbacks */
|
/* old values for socket callbacks */
|
||||||
void (*old_data_ready)(struct sock *, int);
|
void (*old_data_ready)(struct sock *);
|
||||||
void (*old_state_change)(struct sock *);
|
void (*old_state_change)(struct sock *);
|
||||||
void (*old_write_space)(struct sock *);
|
void (*old_write_space)(struct sock *);
|
||||||
|
|
||||||
|
|
|
@ -655,7 +655,7 @@ extern void ksocknal_write_callback (ksock_conn_t *conn);
|
||||||
* socket call back in Linux
|
* socket call back in Linux
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ksocknal_data_ready (struct sock *sk, int n)
|
ksocknal_data_ready (struct sock *sk)
|
||||||
{
|
{
|
||||||
ksock_conn_t *conn;
|
ksock_conn_t *conn;
|
||||||
|
|
||||||
|
@ -666,7 +666,7 @@ ksocknal_data_ready (struct sock *sk, int n)
|
||||||
conn = sk->sk_user_data;
|
conn = sk->sk_user_data;
|
||||||
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
|
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
|
||||||
LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
|
LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
|
||||||
sk->sk_data_ready (sk, n);
|
sk->sk_data_ready (sk);
|
||||||
} else
|
} else
|
||||||
ksocknal_read_callback(conn);
|
ksocknal_read_callback(conn);
|
||||||
|
|
||||||
|
|
|
@ -556,7 +556,7 @@ struct iscsi_conn {
|
||||||
struct completion rx_half_close_comp;
|
struct completion rx_half_close_comp;
|
||||||
/* socket used by this connection */
|
/* socket used by this connection */
|
||||||
struct socket *sock;
|
struct socket *sock;
|
||||||
void (*orig_data_ready)(struct sock *, int);
|
void (*orig_data_ready)(struct sock *);
|
||||||
void (*orig_state_change)(struct sock *);
|
void (*orig_state_change)(struct sock *);
|
||||||
#define LOGIN_FLAGS_READ_ACTIVE 1
|
#define LOGIN_FLAGS_READ_ACTIVE 1
|
||||||
#define LOGIN_FLAGS_CLOSED 2
|
#define LOGIN_FLAGS_CLOSED 2
|
||||||
|
|
|
@ -375,7 +375,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iscsi_target_sk_data_ready(struct sock *sk, int count)
|
static void iscsi_target_sk_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct iscsi_conn *conn = sk->sk_user_data;
|
struct iscsi_conn *conn = sk->sk_user_data;
|
||||||
bool rc;
|
bool rc;
|
||||||
|
|
|
@ -424,7 +424,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data available on socket or listen socket received a connect */
|
/* Data available on socket or listen socket received a connect */
|
||||||
static void lowcomms_data_ready(struct sock *sk, int count_unused)
|
static void lowcomms_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct connection *con = sock2con(sk);
|
struct connection *con = sock2con(sk);
|
||||||
if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
|
if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
|
||||||
|
|
|
@ -111,7 +111,7 @@ struct ncp_server {
|
||||||
|
|
||||||
spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
|
spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
|
||||||
|
|
||||||
void (*data_ready)(struct sock* sk, int len);
|
void (*data_ready)(struct sock* sk);
|
||||||
void (*error_report)(struct sock* sk);
|
void (*error_report)(struct sock* sk);
|
||||||
void (*write_space)(struct sock* sk); /* STREAM mode only */
|
void (*write_space)(struct sock* sk); /* STREAM mode only */
|
||||||
struct {
|
struct {
|
||||||
|
@ -153,7 +153,7 @@ extern void ncp_tcp_tx_proc(struct work_struct *work);
|
||||||
extern void ncpdgram_rcv_proc(struct work_struct *work);
|
extern void ncpdgram_rcv_proc(struct work_struct *work);
|
||||||
extern void ncpdgram_timeout_proc(struct work_struct *work);
|
extern void ncpdgram_timeout_proc(struct work_struct *work);
|
||||||
extern void ncpdgram_timeout_call(unsigned long server);
|
extern void ncpdgram_timeout_call(unsigned long server);
|
||||||
extern void ncp_tcp_data_ready(struct sock* sk, int len);
|
extern void ncp_tcp_data_ready(struct sock* sk);
|
||||||
extern void ncp_tcp_write_space(struct sock* sk);
|
extern void ncp_tcp_write_space(struct sock* sk);
|
||||||
extern void ncp_tcp_error_report(struct sock* sk);
|
extern void ncp_tcp_error_report(struct sock* sk);
|
||||||
|
|
||||||
|
|
|
@ -96,11 +96,11 @@ static void ncp_req_put(struct ncp_request_reply *req)
|
||||||
kfree(req);
|
kfree(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ncp_tcp_data_ready(struct sock *sk, int len)
|
void ncp_tcp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct ncp_server *server = sk->sk_user_data;
|
struct ncp_server *server = sk->sk_user_data;
|
||||||
|
|
||||||
server->data_ready(sk, len);
|
server->data_ready(sk);
|
||||||
schedule_work(&server->rcv.tq);
|
schedule_work(&server->rcv.tq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
|
||||||
static void o2net_sc_connect_completed(struct work_struct *work);
|
static void o2net_sc_connect_completed(struct work_struct *work);
|
||||||
static void o2net_rx_until_empty(struct work_struct *work);
|
static void o2net_rx_until_empty(struct work_struct *work);
|
||||||
static void o2net_shutdown_sc(struct work_struct *work);
|
static void o2net_shutdown_sc(struct work_struct *work);
|
||||||
static void o2net_listen_data_ready(struct sock *sk, int bytes);
|
static void o2net_listen_data_ready(struct sock *sk);
|
||||||
static void o2net_sc_send_keep_req(struct work_struct *work);
|
static void o2net_sc_send_keep_req(struct work_struct *work);
|
||||||
static void o2net_idle_timer(unsigned long data);
|
static void o2net_idle_timer(unsigned long data);
|
||||||
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
|
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
|
||||||
|
@ -597,9 +597,9 @@ static void o2net_set_nn_state(struct o2net_node *nn,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see o2net_register_callbacks() */
|
/* see o2net_register_callbacks() */
|
||||||
static void o2net_data_ready(struct sock *sk, int bytes)
|
static void o2net_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
void (*ready)(struct sock *sk, int bytes);
|
void (*ready)(struct sock *sk);
|
||||||
|
|
||||||
read_lock(&sk->sk_callback_lock);
|
read_lock(&sk->sk_callback_lock);
|
||||||
if (sk->sk_user_data) {
|
if (sk->sk_user_data) {
|
||||||
|
@ -613,7 +613,7 @@ static void o2net_data_ready(struct sock *sk, int bytes)
|
||||||
}
|
}
|
||||||
read_unlock(&sk->sk_callback_lock);
|
read_unlock(&sk->sk_callback_lock);
|
||||||
|
|
||||||
ready(sk, bytes);
|
ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see o2net_register_callbacks() */
|
/* see o2net_register_callbacks() */
|
||||||
|
@ -1953,9 +1953,9 @@ static void o2net_accept_many(struct work_struct *work)
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void o2net_listen_data_ready(struct sock *sk, int bytes)
|
static void o2net_listen_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
void (*ready)(struct sock *sk, int bytes);
|
void (*ready)(struct sock *sk);
|
||||||
|
|
||||||
read_lock(&sk->sk_callback_lock);
|
read_lock(&sk->sk_callback_lock);
|
||||||
ready = sk->sk_user_data;
|
ready = sk->sk_user_data;
|
||||||
|
@ -1978,7 +1978,6 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (sk->sk_state == TCP_LISTEN) {
|
if (sk->sk_state == TCP_LISTEN) {
|
||||||
mlog(ML_TCP, "bytes: %d\n", bytes);
|
|
||||||
queue_work(o2net_wq, &o2net_listen_work);
|
queue_work(o2net_wq, &o2net_listen_work);
|
||||||
} else {
|
} else {
|
||||||
ready = NULL;
|
ready = NULL;
|
||||||
|
@ -1987,7 +1986,7 @@ static void o2net_listen_data_ready(struct sock *sk, int bytes)
|
||||||
out:
|
out:
|
||||||
read_unlock(&sk->sk_callback_lock);
|
read_unlock(&sk->sk_callback_lock);
|
||||||
if (ready != NULL)
|
if (ready != NULL)
|
||||||
ready(sk, bytes);
|
ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int o2net_open_listening_sock(__be32 addr, __be16 port)
|
static int o2net_open_listening_sock(__be32 addr, __be16 port)
|
||||||
|
|
|
@ -165,7 +165,7 @@ struct o2net_sock_container {
|
||||||
|
|
||||||
/* original handlers for the sockets */
|
/* original handlers for the sockets */
|
||||||
void (*sc_state_change)(struct sock *sk);
|
void (*sc_state_change)(struct sock *sk);
|
||||||
void (*sc_data_ready)(struct sock *sk, int bytes);
|
void (*sc_data_ready)(struct sock *sk);
|
||||||
|
|
||||||
u32 sc_msg_key;
|
u32 sc_msg_key;
|
||||||
u16 sc_msg_type;
|
u16 sc_msg_type;
|
||||||
|
|
|
@ -22,7 +22,7 @@ struct svc_sock {
|
||||||
|
|
||||||
/* We keep the old state_change and data_ready CB's here */
|
/* We keep the old state_change and data_ready CB's here */
|
||||||
void (*sk_ostate)(struct sock *);
|
void (*sk_ostate)(struct sock *);
|
||||||
void (*sk_odata)(struct sock *, int bytes);
|
void (*sk_odata)(struct sock *);
|
||||||
void (*sk_owspace)(struct sock *);
|
void (*sk_owspace)(struct sock *);
|
||||||
|
|
||||||
/* private TCP part */
|
/* private TCP part */
|
||||||
|
|
|
@ -101,7 +101,7 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
|
||||||
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||||
int sctp_inet_listen(struct socket *sock, int backlog);
|
int sctp_inet_listen(struct socket *sock, int backlog);
|
||||||
void sctp_write_space(struct sock *sk);
|
void sctp_write_space(struct sock *sk);
|
||||||
void sctp_data_ready(struct sock *sk, int len);
|
void sctp_data_ready(struct sock *sk);
|
||||||
unsigned int sctp_poll(struct file *file, struct socket *sock,
|
unsigned int sctp_poll(struct file *file, struct socket *sock,
|
||||||
poll_table *wait);
|
poll_table *wait);
|
||||||
void sctp_sock_rfree(struct sk_buff *skb);
|
void sctp_sock_rfree(struct sk_buff *skb);
|
||||||
|
|
|
@ -418,7 +418,7 @@ struct sock {
|
||||||
u32 sk_classid;
|
u32 sk_classid;
|
||||||
struct cg_proto *sk_cgrp;
|
struct cg_proto *sk_cgrp;
|
||||||
void (*sk_state_change)(struct sock *sk);
|
void (*sk_state_change)(struct sock *sk);
|
||||||
void (*sk_data_ready)(struct sock *sk, int bytes);
|
void (*sk_data_ready)(struct sock *sk);
|
||||||
void (*sk_write_space)(struct sock *sk);
|
void (*sk_write_space)(struct sock *sk);
|
||||||
void (*sk_error_report)(struct sock *sk);
|
void (*sk_error_report)(struct sock *sk);
|
||||||
int (*sk_backlog_rcv)(struct sock *sk,
|
int (*sk_backlog_rcv)(struct sock *sk,
|
||||||
|
|
|
@ -68,7 +68,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
|
||||||
|
|
||||||
sk = sk_atm(atmarpd);
|
sk = sk_atm(atmarpd);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
|
||||||
atm_force_charge(priv->lecd, skb2->truesize);
|
atm_force_charge(priv->lecd, skb2->truesize);
|
||||||
sk = sk_atm(priv->lecd);
|
sk = sk_atm(priv->lecd);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb2);
|
skb_queue_tail(&sk->sk_receive_queue, skb2);
|
||||||
sk->sk_data_ready(sk, skb2->len);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
|
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
|
||||||
|
@ -447,7 +447,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
atm_force_charge(priv->lecd, skb2->truesize);
|
atm_force_charge(priv->lecd, skb2->truesize);
|
||||||
sk = sk_atm(priv->lecd);
|
sk = sk_atm(priv->lecd);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb2);
|
skb_queue_tail(&sk->sk_receive_queue, skb2);
|
||||||
sk->sk_data_ready(sk, skb2->len);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
|
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
|
||||||
|
@ -530,13 +530,13 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
|
||||||
atm_force_charge(priv->lecd, skb->truesize);
|
atm_force_charge(priv->lecd, skb->truesize);
|
||||||
sk = sk_atm(priv->lecd);
|
sk = sk_atm(priv->lecd);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
if (data != NULL) {
|
if (data != NULL) {
|
||||||
pr_debug("about to send %d bytes of data\n", data->len);
|
pr_debug("about to send %d bytes of data\n", data->len);
|
||||||
atm_force_charge(priv->lecd, data->truesize);
|
atm_force_charge(priv->lecd, data->truesize);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, data);
|
skb_queue_tail(&sk->sk_receive_queue, data);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -616,7 +616,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
|
|
||||||
pr_debug("%s: To daemon\n", dev->name);
|
pr_debug("%s: To daemon\n", dev->name);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
} else { /* Data frame, queue to protocol handlers */
|
} else { /* Data frame, queue to protocol handlers */
|
||||||
struct lec_arp_table *entry;
|
struct lec_arp_table *entry;
|
||||||
unsigned char *src, *dst;
|
unsigned char *src, *dst;
|
||||||
|
|
|
@ -706,7 +706,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
dprintk("(%s) control packet arrived\n", dev->name);
|
dprintk("(%s) control packet arrived\n", dev->name);
|
||||||
/* Pass control packets to daemon */
|
/* Pass control packets to daemon */
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -992,7 +992,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
|
||||||
|
|
||||||
sk = sk_atm(mpc->mpoad_vcc);
|
sk = sk_atm(mpc->mpoad_vcc);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1273,7 +1273,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
|
||||||
|
|
||||||
sk = sk_atm(vcc);
|
sk = sk_atm(vcc);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
dprintk("exiting\n");
|
dprintk("exiting\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
struct sock *sk = sk_atm(vcc);
|
struct sock *sk = sk_atm(vcc);
|
||||||
|
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ static void sigd_put_skb(struct sk_buff *skb)
|
||||||
#endif
|
#endif
|
||||||
atm_force_charge(sigd, skb->truesize);
|
atm_force_charge(sigd, skb->truesize);
|
||||||
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
|
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
|
||||||
sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
|
sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
|
static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
|
||||||
|
|
|
@ -422,7 +422,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
|
||||||
if (sk) {
|
if (sk) {
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
} else {
|
} else {
|
||||||
free:
|
free:
|
||||||
|
|
|
@ -1271,7 +1271,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
|
||||||
|
|
||||||
if (parent) {
|
if (parent) {
|
||||||
bt_accept_unlink(sk);
|
bt_accept_unlink(sk);
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
} else {
|
} else {
|
||||||
sk->sk_state_change(sk);
|
sk->sk_state_change(sk);
|
||||||
}
|
}
|
||||||
|
@ -1327,7 +1327,7 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
|
||||||
sk->sk_state_change(sk);
|
sk->sk_state_change(sk);
|
||||||
|
|
||||||
if (parent)
|
if (parent)
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
}
|
}
|
||||||
|
@ -1340,7 +1340,7 @@ static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
|
||||||
|
|
||||||
parent = bt_sk(sk)->parent;
|
parent = bt_sk(sk)->parent;
|
||||||
if (parent)
|
if (parent)
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk)
|
||||||
rfcomm_schedule();
|
rfcomm_schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rfcomm_l2data_ready(struct sock *sk, int bytes)
|
static void rfcomm_l2data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
BT_DBG("%p bytes %d", sk, bytes);
|
BT_DBG("%p", sk);
|
||||||
rfcomm_schedule();
|
rfcomm_schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
|
||||||
|
|
||||||
atomic_add(skb->len, &sk->sk_rmem_alloc);
|
atomic_add(skb->len, &sk->sk_rmem_alloc);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||||
rfcomm_dlc_throttle(d);
|
rfcomm_dlc_throttle(d);
|
||||||
|
@ -84,7 +84,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
|
||||||
sock_set_flag(sk, SOCK_ZAPPED);
|
sock_set_flag(sk, SOCK_ZAPPED);
|
||||||
bt_accept_unlink(sk);
|
bt_accept_unlink(sk);
|
||||||
}
|
}
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
} else {
|
} else {
|
||||||
if (d->state == BT_CONNECTED)
|
if (d->state == BT_CONNECTED)
|
||||||
rfcomm_session_getaddr(d->session,
|
rfcomm_session_getaddr(d->session,
|
||||||
|
|
|
@ -1024,7 +1024,7 @@ static void sco_conn_ready(struct sco_conn *conn)
|
||||||
sk->sk_state = BT_CONNECTED;
|
sk->sk_state = BT_CONNECTED;
|
||||||
|
|
||||||
/* Wake up parent */
|
/* Wake up parent */
|
||||||
parent->sk_data_ready(parent, 1);
|
parent->sk_data_ready(parent);
|
||||||
|
|
||||||
bh_unlock_sock(parent);
|
bh_unlock_sock(parent);
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,6 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
|
||||||
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
int skb_len;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||||
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
|
||||||
|
@ -153,14 +152,13 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
* may be freed by other threads of control pulling packets
|
* may be freed by other threads of control pulling packets
|
||||||
* from the queue.
|
* from the queue.
|
||||||
*/
|
*/
|
||||||
skb_len = skb->len;
|
|
||||||
spin_lock_irqsave(&list->lock, flags);
|
spin_lock_irqsave(&list->lock, flags);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
__skb_queue_tail(list, skb);
|
__skb_queue_tail(list, skb);
|
||||||
spin_unlock_irqrestore(&list->lock, flags);
|
spin_unlock_irqrestore(&list->lock, flags);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb_len);
|
sk->sk_data_ready(sk);
|
||||||
else
|
else
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -383,7 +383,7 @@ static void con_sock_state_closed(struct ceph_connection *con)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* data available on socket, or listen socket received a connect */
|
/* data available on socket, or listen socket received a connect */
|
||||||
static void ceph_sock_data_ready(struct sock *sk, int count_unused)
|
static void ceph_sock_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct ceph_connection *con = sk->sk_user_data;
|
struct ceph_connection *con = sk->sk_user_data;
|
||||||
if (atomic_read(&con->msgr->stopping)) {
|
if (atomic_read(&con->msgr->stopping)) {
|
||||||
|
|
|
@ -3458,8 +3458,6 @@ static void sock_rmem_free(struct sk_buff *skb)
|
||||||
*/
|
*/
|
||||||
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int len = skb->len;
|
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||||
(unsigned int)sk->sk_rcvbuf)
|
(unsigned int)sk->sk_rcvbuf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -3474,7 +3472,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
skb_queue_tail(&sk->sk_error_queue, skb);
|
skb_queue_tail(&sk->sk_error_queue, skb);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, len);
|
sk->sk_data_ready(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sock_queue_err_skb);
|
EXPORT_SYMBOL(sock_queue_err_skb);
|
||||||
|
|
|
@ -428,7 +428,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
spin_unlock_irqrestore(&list->lock, flags);
|
spin_unlock_irqrestore(&list->lock, flags);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb_len);
|
sk->sk_data_ready(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
||||||
|
@ -2196,7 +2196,7 @@ static void sock_def_error_report(struct sock *sk)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sock_def_readable(struct sock *sk, int len)
|
static void sock_def_readable(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct socket_wq *wq;
|
struct socket_wq *wq;
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
|
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
|
||||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
skb_set_owner_r(skb, sk);
|
skb_set_owner_r(skb, sk);
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
|
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
|
@ -237,7 +237,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
|
||||||
|
|
||||||
/* Wakeup parent, send SIGIO */
|
/* Wakeup parent, send SIGIO */
|
||||||
if (state == DCCP_RESPOND && child->sk_state != state)
|
if (state == DCCP_RESPOND && child->sk_state != state)
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
} else {
|
} else {
|
||||||
/* Alas, it is possible again, because we do lookup
|
/* Alas, it is possible again, because we do lookup
|
||||||
* in main socket hash table and lock on listening
|
* in main socket hash table and lock on listening
|
||||||
|
|
|
@ -585,7 +585,6 @@ out:
|
||||||
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
|
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
int skb_len;
|
|
||||||
|
|
||||||
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
||||||
number of warnings when compiling with -W --ANK
|
number of warnings when compiling with -W --ANK
|
||||||
|
@ -600,12 +599,11 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
skb_len = skb->len;
|
|
||||||
skb_set_owner_r(skb, sk);
|
skb_set_owner_r(skb, sk);
|
||||||
skb_queue_tail(queue, skb);
|
skb_queue_tail(queue, skb);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb_len);
|
sk->sk_data_ready(sk);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4413,7 +4413,7 @@ queue_and_out:
|
||||||
if (eaten > 0)
|
if (eaten > 0)
|
||||||
kfree_skb_partial(skb, fragstolen);
|
kfree_skb_partial(skb, fragstolen);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4914,7 +4914,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
|
||||||
BUG();
|
BUG();
|
||||||
tp->urg_data = TCP_URG_VALID | tmp;
|
tp->urg_data = TCP_URG_VALID | tmp;
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5000,11 +5000,11 @@ static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
|
||||||
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
|
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
|
||||||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
||||||
tp->ucopy.wakeup = 1;
|
tp->ucopy.wakeup = 1;
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
} else if (chunk > 0) {
|
} else if (chunk > 0) {
|
||||||
tp->ucopy.wakeup = 1;
|
tp->ucopy.wakeup = 1;
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return copied_early;
|
return copied_early;
|
||||||
|
@ -5275,7 +5275,7 @@ no_ack:
|
||||||
#endif
|
#endif
|
||||||
if (eaten)
|
if (eaten)
|
||||||
kfree_skb_partial(skb, fragstolen);
|
kfree_skb_partial(skb, fragstolen);
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1434,7 +1434,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
|
||||||
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
||||||
tp->syn_data_acked = 1;
|
tp->syn_data_acked = 1;
|
||||||
}
|
}
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
bh_unlock_sock(child);
|
bh_unlock_sock(child);
|
||||||
sock_put(child);
|
sock_put(child);
|
||||||
WARN_ON(req->sk == NULL);
|
WARN_ON(req->sk == NULL);
|
||||||
|
|
|
@ -745,7 +745,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
||||||
skb->len);
|
skb->len);
|
||||||
/* Wakeup parent, send SIGIO */
|
/* Wakeup parent, send SIGIO */
|
||||||
if (state == TCP_SYN_RECV && child->sk_state != state)
|
if (state == TCP_SYN_RECV && child->sk_state != state)
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
} else {
|
} else {
|
||||||
/* Alas, it is possible again, because we do lookup
|
/* Alas, it is possible again, because we do lookup
|
||||||
* in main socket hash table and lock on listening
|
* in main socket hash table and lock on listening
|
||||||
|
|
|
@ -1757,7 +1757,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
|
||||||
|
|
||||||
/* Wake up accept */
|
/* Wake up accept */
|
||||||
nsk->sk_state = IUCV_CONNECTED;
|
nsk->sk_state = IUCV_CONNECTED;
|
||||||
sk->sk_data_ready(sk, 1);
|
sk->sk_data_ready(sk);
|
||||||
err = 0;
|
err = 0;
|
||||||
fail:
|
fail:
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
@ -1968,7 +1968,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
|
||||||
if (!err) {
|
if (!err) {
|
||||||
iucv_accept_enqueue(sk, nsk);
|
iucv_accept_enqueue(sk, nsk);
|
||||||
nsk->sk_state = IUCV_CONNECTED;
|
nsk->sk_state = IUCV_CONNECTED;
|
||||||
sk->sk_data_ready(sk, 1);
|
sk->sk_data_ready(sk);
|
||||||
} else
|
} else
|
||||||
iucv_sock_kill(nsk);
|
iucv_sock_kill(nsk);
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
|
|
@ -205,7 +205,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
|
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
|
||||||
skb_set_owner_r(*skb2, sk);
|
skb_set_owner_r(*skb2, sk);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, *skb2);
|
skb_queue_tail(&sk->sk_receive_queue, *skb2);
|
||||||
sk->sk_data_ready(sk, (*skb2)->len);
|
sk->sk_data_ready(sk);
|
||||||
*skb2 = NULL;
|
*skb2 = NULL;
|
||||||
err = 0;
|
err = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1653,7 +1653,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
|
||||||
else
|
else
|
||||||
#endif /* CONFIG_NETLINK_MMAP */
|
#endif /* CONFIG_NETLINK_MMAP */
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
sk->sk_data_ready(sk, len);
|
sk->sk_data_ready(sk);
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2394,7 +2394,7 @@ out:
|
||||||
return err ? : copied;
|
return err ? : copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void netlink_data_ready(struct sock *sk, int len)
|
static void netlink_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1011,7 +1011,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
||||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
|
||||||
|
|
|
@ -976,7 +976,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
|
||||||
new_sk->sk_state = LLCP_CONNECTED;
|
new_sk->sk_state = LLCP_CONNECTED;
|
||||||
|
|
||||||
/* Wake the listening processes */
|
/* Wake the listening processes */
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent);
|
||||||
|
|
||||||
/* Send CC */
|
/* Send CC */
|
||||||
nfc_llcp_send_cc(new_sock);
|
nfc_llcp_send_cc(new_sock);
|
||||||
|
|
|
@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
skb->dropcount = atomic_read(&sk->sk_drops);
|
skb->dropcount = atomic_read(&sk->sk_drops);
|
||||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
spin_unlock(&sk->sk_receive_queue.lock);
|
spin_unlock(&sk->sk_receive_queue.lock);
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drop_n_acct:
|
drop_n_acct:
|
||||||
|
@ -2054,7 +2054,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
else
|
else
|
||||||
prb_clear_blk_fill_status(&po->rx_ring);
|
prb_clear_blk_fill_status(&po->rx_ring);
|
||||||
|
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
drop_n_restore:
|
drop_n_restore:
|
||||||
if (skb_head != skb->data && skb_shared(skb)) {
|
if (skb_head != skb->data && skb_shared(skb)) {
|
||||||
|
@ -2069,7 +2069,7 @@ ring_is_full:
|
||||||
po->stats.stats1.tp_drops++;
|
po->stats.stats1.tp_drops++;
|
||||||
spin_unlock(&sk->sk_receive_queue.lock);
|
spin_unlock(&sk->sk_receive_queue.lock);
|
||||||
|
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
kfree_skb(copy_skb);
|
kfree_skb(copy_skb);
|
||||||
goto drop_n_restore;
|
goto drop_n_restore;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
struct gprs_dev {
|
struct gprs_dev {
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
void (*old_state_change)(struct sock *);
|
void (*old_state_change)(struct sock *);
|
||||||
void (*old_data_ready)(struct sock *, int);
|
void (*old_data_ready)(struct sock *);
|
||||||
void (*old_write_space)(struct sock *);
|
void (*old_write_space)(struct sock *);
|
||||||
|
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
|
@ -146,7 +146,7 @@ drop:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gprs_data_ready(struct sock *sk, int len)
|
static void gprs_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct gprs_dev *gp = sk->sk_user_data;
|
struct gprs_dev *gp = sk->sk_user_data;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
|
@ -462,10 +462,9 @@ out:
|
||||||
queue:
|
queue:
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
skb_set_owner_r(skb, sk);
|
skb_set_owner_r(skb, sk);
|
||||||
err = skb->len;
|
|
||||||
skb_queue_tail(queue, skb);
|
skb_queue_tail(queue, skb);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, err);
|
sk->sk_data_ready(sk);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -587,10 +586,9 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
pn->rx_credits--;
|
pn->rx_credits--;
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
skb_set_owner_r(skb, sk);
|
skb_set_owner_r(skb, sk);
|
||||||
err = skb->len;
|
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, err);
|
sk->sk_data_ready(sk);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
|
|
||||||
case PNS_PEP_CONNECT_RESP:
|
case PNS_PEP_CONNECT_RESP:
|
||||||
|
@ -698,7 +696,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||||
sk_acceptq_added(sk);
|
sk_acceptq_added(sk);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
|
|
||||||
case PNS_PEP_DISCONNECT_REQ:
|
case PNS_PEP_DISCONNECT_REQ:
|
||||||
|
|
|
@ -61,12 +61,12 @@ void rds_tcp_state_change(struct sock *sk);
|
||||||
/* tcp_listen.c */
|
/* tcp_listen.c */
|
||||||
int rds_tcp_listen_init(void);
|
int rds_tcp_listen_init(void);
|
||||||
void rds_tcp_listen_stop(void);
|
void rds_tcp_listen_stop(void);
|
||||||
void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
|
void rds_tcp_listen_data_ready(struct sock *sk);
|
||||||
|
|
||||||
/* tcp_recv.c */
|
/* tcp_recv.c */
|
||||||
int rds_tcp_recv_init(void);
|
int rds_tcp_recv_init(void);
|
||||||
void rds_tcp_recv_exit(void);
|
void rds_tcp_recv_exit(void);
|
||||||
void rds_tcp_data_ready(struct sock *sk, int bytes);
|
void rds_tcp_data_ready(struct sock *sk);
|
||||||
int rds_tcp_recv(struct rds_connection *conn);
|
int rds_tcp_recv(struct rds_connection *conn);
|
||||||
void rds_tcp_inc_free(struct rds_incoming *inc);
|
void rds_tcp_inc_free(struct rds_incoming *inc);
|
||||||
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
|
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
|
||||||
|
|
|
@ -108,9 +108,9 @@ static void rds_tcp_accept_worker(struct work_struct *work)
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
|
void rds_tcp_listen_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
void (*ready)(struct sock *sk, int bytes);
|
void (*ready)(struct sock *sk);
|
||||||
|
|
||||||
rdsdebug("listen data ready sk %p\n", sk);
|
rdsdebug("listen data ready sk %p\n", sk);
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
read_unlock(&sk->sk_callback_lock);
|
read_unlock(&sk->sk_callback_lock);
|
||||||
ready(sk, bytes);
|
ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rds_tcp_listen_init(void)
|
int rds_tcp_listen_init(void)
|
||||||
|
|
|
@ -314,13 +314,13 @@ int rds_tcp_recv(struct rds_connection *conn)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rds_tcp_data_ready(struct sock *sk, int bytes)
|
void rds_tcp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
void (*ready)(struct sock *sk, int bytes);
|
void (*ready)(struct sock *sk);
|
||||||
struct rds_connection *conn;
|
struct rds_connection *conn;
|
||||||
struct rds_tcp_connection *tc;
|
struct rds_tcp_connection *tc;
|
||||||
|
|
||||||
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
|
rdsdebug("data ready sk %p\n", sk);
|
||||||
|
|
||||||
read_lock(&sk->sk_callback_lock);
|
read_lock(&sk->sk_callback_lock);
|
||||||
conn = sk->sk_user_data;
|
conn = sk->sk_user_data;
|
||||||
|
@ -337,7 +337,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
|
||||||
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
|
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
|
||||||
out:
|
out:
|
||||||
read_unlock(&sk->sk_callback_lock);
|
read_unlock(&sk->sk_callback_lock);
|
||||||
ready(sk, bytes);
|
ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rds_tcp_recv_init(void)
|
int rds_tcp_recv_init(void)
|
||||||
|
|
|
@ -1041,7 +1041,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
|
||||||
rose_start_heartbeat(make);
|
rose_start_heartbeat(make);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,7 +113,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
|
||||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb_len);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
} else {
|
} else {
|
||||||
|
@ -632,14 +632,14 @@ cant_find_conn:
|
||||||
* handle data received on the local endpoint
|
* handle data received on the local endpoint
|
||||||
* - may be called in interrupt context
|
* - may be called in interrupt context
|
||||||
*/
|
*/
|
||||||
void rxrpc_data_ready(struct sock *sk, int count)
|
void rxrpc_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct rxrpc_skb_priv *sp;
|
struct rxrpc_skb_priv *sp;
|
||||||
struct rxrpc_local *local;
|
struct rxrpc_local *local;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
_enter("%p, %d", sk, count);
|
_enter("%p", sk);
|
||||||
|
|
||||||
ASSERT(!irqs_disabled());
|
ASSERT(!irqs_disabled());
|
||||||
|
|
||||||
|
|
|
@ -518,7 +518,7 @@ void rxrpc_UDP_error_handler(struct work_struct *);
|
||||||
*/
|
*/
|
||||||
extern const char *rxrpc_pkts[];
|
extern const char *rxrpc_pkts[];
|
||||||
|
|
||||||
void rxrpc_data_ready(struct sock *, int);
|
void rxrpc_data_ready(struct sock *);
|
||||||
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
|
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
|
||||||
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
|
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
|
||||||
|
|
||||||
|
|
|
@ -6745,7 +6745,7 @@ do_nonblock:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
void sctp_data_ready(struct sock *sk, int len)
|
void sctp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct socket_wq *wq;
|
struct socket_wq *wq;
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
||||||
sctp_ulpq_clear_pd(ulpq);
|
sctp_ulpq_clear_pd(ulpq);
|
||||||
|
|
||||||
if (queue == &sk->sk_receive_queue)
|
if (queue == &sk->sk_receive_queue)
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -1135,5 +1135,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
|
||||||
|
|
||||||
/* If there is data waiting, send it up the socket now. */
|
/* If there is data waiting, send it up the socket now. */
|
||||||
if (sctp_ulpq_clear_pd(ulpq) || ev)
|
if (sctp_ulpq_clear_pd(ulpq) || ev)
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@
|
||||||
|
|
||||||
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
|
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
|
||||||
int flags);
|
int flags);
|
||||||
static void svc_udp_data_ready(struct sock *, int);
|
static void svc_udp_data_ready(struct sock *);
|
||||||
static int svc_udp_recvfrom(struct svc_rqst *);
|
static int svc_udp_recvfrom(struct svc_rqst *);
|
||||||
static int svc_udp_sendto(struct svc_rqst *);
|
static int svc_udp_sendto(struct svc_rqst *);
|
||||||
static void svc_sock_detach(struct svc_xprt *);
|
static void svc_sock_detach(struct svc_xprt *);
|
||||||
|
@ -403,14 +403,14 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
|
||||||
/*
|
/*
|
||||||
* INET callback when data has been received on the socket.
|
* INET callback when data has been received on the socket.
|
||||||
*/
|
*/
|
||||||
static void svc_udp_data_ready(struct sock *sk, int count)
|
static void svc_udp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
||||||
wait_queue_head_t *wq = sk_sleep(sk);
|
wait_queue_head_t *wq = sk_sleep(sk);
|
||||||
|
|
||||||
if (svsk) {
|
if (svsk) {
|
||||||
dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
|
dprintk("svc: socket %p(inet %p), busy=%d\n",
|
||||||
svsk, sk, count,
|
svsk, sk,
|
||||||
test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
|
test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
|
||||||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||||
svc_xprt_enqueue(&svsk->sk_xprt);
|
svc_xprt_enqueue(&svsk->sk_xprt);
|
||||||
|
@ -731,7 +731,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
||||||
* A data_ready event on a listening socket means there's a connection
|
* A data_ready event on a listening socket means there's a connection
|
||||||
* pending. Do not use state_change as a substitute for it.
|
* pending. Do not use state_change as a substitute for it.
|
||||||
*/
|
*/
|
||||||
static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
|
static void svc_tcp_listen_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
||||||
wait_queue_head_t *wq;
|
wait_queue_head_t *wq;
|
||||||
|
@ -783,7 +783,7 @@ static void svc_tcp_state_change(struct sock *sk)
|
||||||
wake_up_interruptible_all(wq);
|
wake_up_interruptible_all(wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svc_tcp_data_ready(struct sock *sk, int count)
|
static void svc_tcp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
||||||
wait_queue_head_t *wq = sk_sleep(sk);
|
wait_queue_head_t *wq = sk_sleep(sk);
|
||||||
|
|
|
@ -254,7 +254,7 @@ struct sock_xprt {
|
||||||
/*
|
/*
|
||||||
* Saved socket callback addresses
|
* Saved socket callback addresses
|
||||||
*/
|
*/
|
||||||
void (*old_data_ready)(struct sock *, int);
|
void (*old_data_ready)(struct sock *);
|
||||||
void (*old_state_change)(struct sock *);
|
void (*old_state_change)(struct sock *);
|
||||||
void (*old_write_space)(struct sock *);
|
void (*old_write_space)(struct sock *);
|
||||||
void (*old_error_report)(struct sock *);
|
void (*old_error_report)(struct sock *);
|
||||||
|
@ -946,7 +946,7 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
|
||||||
*
|
*
|
||||||
* Currently this assumes we can read the whole reply in a single gulp.
|
* Currently this assumes we can read the whole reply in a single gulp.
|
||||||
*/
|
*/
|
||||||
static void xs_local_data_ready(struct sock *sk, int len)
|
static void xs_local_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct rpc_task *task;
|
struct rpc_task *task;
|
||||||
struct rpc_xprt *xprt;
|
struct rpc_xprt *xprt;
|
||||||
|
@ -1009,7 +1009,7 @@ static void xs_local_data_ready(struct sock *sk, int len)
|
||||||
* @len: how much data to read
|
* @len: how much data to read
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void xs_udp_data_ready(struct sock *sk, int len)
|
static void xs_udp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct rpc_task *task;
|
struct rpc_task *task;
|
||||||
struct rpc_xprt *xprt;
|
struct rpc_xprt *xprt;
|
||||||
|
@ -1432,7 +1432,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
|
||||||
* @bytes: how much data to read
|
* @bytes: how much data to read
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void xs_tcp_data_ready(struct sock *sk, int bytes)
|
static void xs_tcp_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct rpc_xprt *xprt;
|
struct rpc_xprt *xprt;
|
||||||
read_descriptor_t rd_desc;
|
read_descriptor_t rd_desc;
|
||||||
|
|
|
@ -119,7 +119,7 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
|
||||||
return con;
|
return con;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sock_data_ready(struct sock *sk, int unused)
|
static void sock_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tipc_conn *con;
|
struct tipc_conn *con;
|
||||||
|
|
||||||
|
@ -297,7 +297,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
|
||||||
newcon->usr_data = s->tipc_conn_new(newcon->conid);
|
newcon->usr_data = s->tipc_conn_new(newcon->conid);
|
||||||
|
|
||||||
/* Wake up receive process in case of 'SYN+' message */
|
/* Wake up receive process in case of 'SYN+' message */
|
||||||
newsock->sk->sk_data_ready(newsock->sk, 0);
|
newsock->sk->sk_data_ready(newsock->sk);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@
|
||||||
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
|
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
|
||||||
|
|
||||||
static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||||
static void tipc_data_ready(struct sock *sk, int len);
|
static void tipc_data_ready(struct sock *sk);
|
||||||
static void tipc_write_space(struct sock *sk);
|
static void tipc_write_space(struct sock *sk);
|
||||||
static int tipc_release(struct socket *sock);
|
static int tipc_release(struct socket *sock);
|
||||||
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
|
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
|
||||||
|
@ -1248,7 +1248,7 @@ static void tipc_write_space(struct sock *sk)
|
||||||
* @sk: socket
|
* @sk: socket
|
||||||
* @len: the length of messages
|
* @len: the length of messages
|
||||||
*/
|
*/
|
||||||
static void tipc_data_ready(struct sock *sk, int len)
|
static void tipc_data_ready(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct socket_wq *wq;
|
struct socket_wq *wq;
|
||||||
|
|
||||||
|
@ -1410,7 +1410,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
|
||||||
__skb_queue_tail(&sk->sk_receive_queue, buf);
|
__skb_queue_tail(&sk->sk_receive_queue, buf);
|
||||||
skb_set_owner_r(buf, sk);
|
skb_set_owner_r(buf, sk);
|
||||||
|
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
return TIPC_OK;
|
return TIPC_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1217,7 +1217,7 @@ restart:
|
||||||
__skb_queue_tail(&other->sk_receive_queue, skb);
|
__skb_queue_tail(&other->sk_receive_queue, skb);
|
||||||
spin_unlock(&other->sk_receive_queue.lock);
|
spin_unlock(&other->sk_receive_queue.lock);
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
other->sk_data_ready(other, 0);
|
other->sk_data_ready(other);
|
||||||
sock_put(other);
|
sock_put(other);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -1600,7 +1600,7 @@ restart:
|
||||||
if (max_level > unix_sk(other)->recursion_level)
|
if (max_level > unix_sk(other)->recursion_level)
|
||||||
unix_sk(other)->recursion_level = max_level;
|
unix_sk(other)->recursion_level = max_level;
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
other->sk_data_ready(other, len);
|
other->sk_data_ready(other);
|
||||||
sock_put(other);
|
sock_put(other);
|
||||||
scm_destroy(siocb->scm);
|
scm_destroy(siocb->scm);
|
||||||
return len;
|
return len;
|
||||||
|
@ -1706,7 +1706,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
||||||
if (max_level > unix_sk(other)->recursion_level)
|
if (max_level > unix_sk(other)->recursion_level)
|
||||||
unix_sk(other)->recursion_level = max_level;
|
unix_sk(other)->recursion_level = max_level;
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
other->sk_data_ready(other, size);
|
other->sk_data_ready(other);
|
||||||
sent += size;
|
sent += size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -315,7 +315,7 @@ vmci_transport_handle_wrote(struct sock *sk,
|
||||||
struct vsock_sock *vsk = vsock_sk(sk);
|
struct vsock_sock *vsk = vsock_sk(sk);
|
||||||
PKT_FIELD(vsk, sent_waiting_read) = false;
|
PKT_FIELD(vsk, sent_waiting_read) = false;
|
||||||
#endif
|
#endif
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
|
static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
|
||||||
|
|
|
@ -92,7 +92,7 @@ vmci_transport_handle_wrote(struct sock *sk,
|
||||||
bool bottom_half,
|
bool bottom_half,
|
||||||
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
||||||
{
|
{
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vsock_block_update_write_window(struct sock *sk)
|
static void vsock_block_update_write_window(struct sock *sk)
|
||||||
|
@ -290,7 +290,7 @@ vmci_transport_notify_pkt_recv_post_dequeue(
|
||||||
/* See the comment in
|
/* See the comment in
|
||||||
* vmci_transport_notify_pkt_send_post_enqueue().
|
* vmci_transport_notify_pkt_send_post_enqueue().
|
||||||
*/
|
*/
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -1064,7 +1064,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
|
||||||
x25_start_heartbeat(make);
|
x25_start_heartbeat(make);
|
||||||
|
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skb->len);
|
sk->sk_data_ready(sk);
|
||||||
rc = 1;
|
rc = 1;
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -79,7 +79,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
|
||||||
skb_set_owner_r(skbn, sk);
|
skb_set_owner_r(skbn, sk);
|
||||||
skb_queue_tail(&sk->sk_receive_queue, skbn);
|
skb_queue_tail(&sk->sk_receive_queue, skbn);
|
||||||
if (!sock_flag(sk, SOCK_DEAD))
|
if (!sock_flag(sk, SOCK_DEAD))
|
||||||
sk->sk_data_ready(sk, skbn->len);
|
sk->sk_data_ready(sk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче