Merge branch 'mptcp-re-enable-sndbuf-autotune'
Paolo Abeni says: ==================== mptcp: re-enable sndbuf autotune The sendbuffer autotuning was unintentionally disabled as a side effect of the recent workqueue removal refactor. These patches re-enable id, with some extra care: with autotuning enable/large send buffer we need a more accurate packet scheduler to be able to use efficiently the available subflow bandwidth, especially when the subflows have different capacities. The first patch cleans-up subflow socket handling, making the actual re-enable (patch 2) simpler. Patches 3 and 4 improve the packet scheduler, to better cope with non trivial scenarios and large send buffer. Finally patch 5 adds and uses some infrastructure to avoid the workqueue usage for the packet scheduler operations introduced by the previous patches. ==================== Link: https://lore.kernel.org/r/cover.1611153172.git.pabeni@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Коммит
07fe179e5d
|
@ -45,6 +45,9 @@ static struct percpu_counter mptcp_sockets_allocated;
|
|||
static void __mptcp_destroy_sock(struct sock *sk);
|
||||
static void __mptcp_check_send_data_fin(struct sock *sk);
|
||||
|
||||
DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
|
||||
static struct net_device mptcp_napi_dev;
|
||||
|
||||
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
|
||||
* completed yet or has failed, return the subflow socket.
|
||||
* Otherwise return NULL.
|
||||
|
@ -114,11 +117,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
|
|||
list_add(&subflow->node, &msk->conn_list);
|
||||
sock_hold(ssock->sk);
|
||||
subflow->request_mptcp = 1;
|
||||
|
||||
/* accept() will wait on first subflow sk_wq, and we always wakes up
|
||||
* via msk->sk_socket
|
||||
*/
|
||||
RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
|
||||
mptcp_sock_graft(msk->first, sk->sk_socket);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -734,10 +733,14 @@ wake:
|
|||
|
||||
void __mptcp_flush_join_list(struct mptcp_sock *msk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
|
||||
if (likely(list_empty(&msk->join_list)))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&msk->join_list_lock);
|
||||
list_for_each_entry(subflow, &msk->join_list, node)
|
||||
mptcp_propagate_sndbuf((struct sock *)msk, mptcp_subflow_tcp_sock(subflow));
|
||||
list_splice_tail_init(&msk->join_list, &msk->conn_list);
|
||||
spin_unlock_bh(&msk->join_list_lock);
|
||||
}
|
||||
|
@ -1037,13 +1040,6 @@ out:
|
|||
__mptcp_update_wmem(sk);
|
||||
sk_mem_reclaim_partial(sk);
|
||||
}
|
||||
|
||||
if (sk_stream_is_writeable(sk)) {
|
||||
/* pairs with memory barrier in mptcp_poll */
|
||||
smp_mb();
|
||||
if (test_and_clear_bit(MPTCP_NOSPACE, &msk->flags))
|
||||
sk_stream_write_space(sk);
|
||||
}
|
||||
}
|
||||
|
||||
if (snd_una == READ_ONCE(msk->snd_nxt)) {
|
||||
|
@ -1362,8 +1358,7 @@ struct subflow_send_info {
|
|||
u64 ratio;
|
||||
};
|
||||
|
||||
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
|
||||
u32 *sndbuf)
|
||||
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
|
||||
{
|
||||
struct subflow_send_info send_info[2];
|
||||
struct mptcp_subflow_context *subflow;
|
||||
|
@ -1374,24 +1369,17 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
|
|||
|
||||
sock_owned_by_me((struct sock *)msk);
|
||||
|
||||
*sndbuf = 0;
|
||||
if (__mptcp_check_fallback(msk)) {
|
||||
if (!msk->first)
|
||||
return NULL;
|
||||
*sndbuf = msk->first->sk_sndbuf;
|
||||
return sk_stream_memory_free(msk->first) ? msk->first : NULL;
|
||||
}
|
||||
|
||||
/* re-use last subflow, if the burst allow that */
|
||||
if (msk->last_snd && msk->snd_burst > 0 &&
|
||||
sk_stream_memory_free(msk->last_snd) &&
|
||||
mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
|
||||
mptcp_for_each_subflow(msk, subflow) {
|
||||
ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
|
||||
}
|
||||
mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd)))
|
||||
return msk->last_snd;
|
||||
}
|
||||
|
||||
/* pick the subflow with the lower wmem/wspace ratio */
|
||||
for (i = 0; i < 2; ++i) {
|
||||
|
@ -1404,8 +1392,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
|
|||
continue;
|
||||
|
||||
nr_active += !subflow->backup;
|
||||
*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
|
||||
if (!sk_stream_memory_free(subflow->tcp_sock))
|
||||
if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
|
||||
continue;
|
||||
|
||||
pace = READ_ONCE(ssk->sk_pacing_rate);
|
||||
|
@ -1431,9 +1418,10 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
|
|||
if (send_info[0].ssk) {
|
||||
msk->last_snd = send_info[0].ssk;
|
||||
msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
|
||||
sk_stream_wspace(msk->last_snd));
|
||||
tcp_sk(msk->last_snd)->snd_wnd);
|
||||
return msk->last_snd;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1454,7 +1442,6 @@ static void mptcp_push_pending(struct sock *sk, unsigned int flags)
|
|||
};
|
||||
struct mptcp_data_frag *dfrag;
|
||||
int len, copied = 0;
|
||||
u32 sndbuf;
|
||||
|
||||
while ((dfrag = mptcp_send_head(sk))) {
|
||||
info.sent = dfrag->already_sent;
|
||||
|
@ -1465,12 +1452,7 @@ static void mptcp_push_pending(struct sock *sk, unsigned int flags)
|
|||
|
||||
prev_ssk = ssk;
|
||||
__mptcp_flush_join_list(msk);
|
||||
ssk = mptcp_subflow_get_send(msk, &sndbuf);
|
||||
|
||||
/* do auto tuning */
|
||||
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
|
||||
sndbuf > READ_ONCE(sk->sk_sndbuf))
|
||||
WRITE_ONCE(sk->sk_sndbuf, sndbuf);
|
||||
ssk = mptcp_subflow_get_send(msk);
|
||||
|
||||
/* try to keep the subflow socket lock across
|
||||
* consecutive xmit on the same socket
|
||||
|
@ -1527,7 +1509,9 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
|
|||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
struct mptcp_sendmsg_info info;
|
||||
struct mptcp_data_frag *dfrag;
|
||||
struct sock *xmit_ssk;
|
||||
int len, copied = 0;
|
||||
bool first = true;
|
||||
|
||||
info.flags = 0;
|
||||
while ((dfrag = mptcp_send_head(sk))) {
|
||||
|
@ -1537,10 +1521,17 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
|
|||
while (len > 0) {
|
||||
int ret = 0;
|
||||
|
||||
/* do auto tuning */
|
||||
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
|
||||
ssk->sk_sndbuf > READ_ONCE(sk->sk_sndbuf))
|
||||
WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
|
||||
/* the caller already invoked the packet scheduler,
|
||||
* check for a different subflow usage only after
|
||||
* spooling the first chunk of data
|
||||
*/
|
||||
xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
|
||||
if (!xmit_ssk)
|
||||
goto out;
|
||||
if (xmit_ssk != ssk) {
|
||||
mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
|
||||
__mptcp_update_wmem(sk);
|
||||
|
@ -1560,6 +1551,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
|
|||
msk->tx_pending_data -= ret;
|
||||
copied += ret;
|
||||
len -= ret;
|
||||
first = false;
|
||||
}
|
||||
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
|
||||
}
|
||||
|
@ -1579,6 +1571,15 @@ out:
|
|||
}
|
||||
}
|
||||
|
||||
static void mptcp_set_nospace(struct sock *sk)
|
||||
{
|
||||
/* enable autotune */
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
|
||||
/* will be cleared on avail space */
|
||||
set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
|
||||
}
|
||||
|
||||
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
|
@ -1680,7 +1681,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
continue;
|
||||
|
||||
wait_for_memory:
|
||||
set_bit(MPTCP_NOSPACE, &msk->flags);
|
||||
mptcp_set_nospace(sk);
|
||||
mptcp_push_pending(sk, msg->msg_flags);
|
||||
ret = sk_stream_wait_memory(sk, &timeo);
|
||||
if (ret)
|
||||
|
@ -2116,9 +2117,6 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
|
|||
void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
||||
struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
bool dispose_socket = false;
|
||||
struct socket *sock;
|
||||
|
||||
list_del(&subflow->node);
|
||||
|
||||
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
|
||||
|
@ -2126,11 +2124,8 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
|||
/* if we are invoked by the msk cleanup code, the subflow is
|
||||
* already orphaned
|
||||
*/
|
||||
sock = ssk->sk_socket;
|
||||
if (sock) {
|
||||
dispose_socket = sock != sk->sk_socket;
|
||||
if (ssk->sk_socket)
|
||||
sock_orphan(ssk);
|
||||
}
|
||||
|
||||
subflow->disposable = 1;
|
||||
|
||||
|
@ -2148,8 +2143,6 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
|||
__sock_put(ssk);
|
||||
}
|
||||
release_sock(ssk);
|
||||
if (dispose_socket)
|
||||
iput(SOCK_INODE(sock));
|
||||
|
||||
sock_put(ssk);
|
||||
}
|
||||
|
@ -2536,6 +2529,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
|
|||
|
||||
pr_debug("msk=%p", msk);
|
||||
|
||||
/* dispose the ancillatory tcp socket, if any */
|
||||
if (msk->subflow) {
|
||||
iput(SOCK_INODE(msk->subflow));
|
||||
msk->subflow = NULL;
|
||||
}
|
||||
|
||||
/* be sure to always acquire the join list lock, to sync vs
|
||||
* mptcp_finish_join().
|
||||
*/
|
||||
|
@ -2586,20 +2585,10 @@ cleanup:
|
|||
inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
|
||||
list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) {
|
||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
bool slow, dispose_socket;
|
||||
struct socket *sock;
|
||||
bool slow = lock_sock_fast(ssk);
|
||||
|
||||
slow = lock_sock_fast(ssk);
|
||||
sock = ssk->sk_socket;
|
||||
dispose_socket = sock && sock != sk->sk_socket;
|
||||
sock_orphan(ssk);
|
||||
unlock_sock_fast(ssk, slow);
|
||||
|
||||
/* for the outgoing subflows we additionally need to free
|
||||
* the associated socket
|
||||
*/
|
||||
if (dispose_socket)
|
||||
iput(SOCK_INODE(sock));
|
||||
}
|
||||
sock_orphan(sk);
|
||||
|
||||
|
@ -2928,10 +2917,16 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
|
|||
if (!mptcp_send_head(sk))
|
||||
return;
|
||||
|
||||
if (!sock_owned_by_user(sk))
|
||||
__mptcp_subflow_push_pending(sk, ssk);
|
||||
else
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
|
||||
|
||||
if (xmit_ssk == ssk)
|
||||
__mptcp_subflow_push_pending(sk, ssk);
|
||||
else if (xmit_ssk)
|
||||
mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
|
||||
} else {
|
||||
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
|
||||
}
|
||||
}
|
||||
|
||||
#define MPTCP_DEFERRED_ALL (TCPF_WRITE_TIMER_DEFERRED)
|
||||
|
@ -2979,6 +2974,20 @@ static void mptcp_release_cb(struct sock *sk)
|
|||
}
|
||||
}
|
||||
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
struct sock *sk = subflow->conn;
|
||||
|
||||
mptcp_data_lock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
__mptcp_subflow_push_pending(sk, ssk);
|
||||
else
|
||||
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
|
||||
mptcp_data_unlock(sk);
|
||||
mptcp_subflow_delegated_done(subflow);
|
||||
}
|
||||
|
||||
static int mptcp_hash(struct sock *sk)
|
||||
{
|
||||
/* should never be called,
|
||||
|
@ -3041,7 +3050,7 @@ void mptcp_finish_connect(struct sock *ssk)
|
|||
mptcp_rcv_space_init(msk, ssk);
|
||||
}
|
||||
|
||||
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
|
||||
void mptcp_sock_graft(struct sock *sk, struct socket *parent)
|
||||
{
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
rcu_assign_pointer(sk->sk_wq, &parent->wq);
|
||||
|
@ -3284,6 +3293,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
|
|||
|
||||
mptcp_copy_inaddrs(newsk, msk->first);
|
||||
mptcp_rcv_space_init(msk, msk->first);
|
||||
mptcp_propagate_sndbuf(newsk, msk->first);
|
||||
|
||||
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
|
||||
* This is needed so NOSPACE flag can be set from tcp stack.
|
||||
|
@ -3324,7 +3334,7 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
|
|||
if (sk_stream_is_writeable(sk))
|
||||
return EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
set_bit(MPTCP_NOSPACE, &msk->flags);
|
||||
mptcp_set_nospace(sk);
|
||||
smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
|
||||
if (sk_stream_is_writeable(sk))
|
||||
return EPOLLOUT | EPOLLWRNORM;
|
||||
|
@ -3388,13 +3398,58 @@ static struct inet_protosw mptcp_protosw = {
|
|||
.flags = INET_PROTOSW_ICSK,
|
||||
};
|
||||
|
||||
static int mptcp_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mptcp_delegated_action *delegated;
|
||||
struct mptcp_subflow_context *subflow;
|
||||
int work_done = 0;
|
||||
|
||||
delegated = container_of(napi, struct mptcp_delegated_action, napi);
|
||||
while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
|
||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
|
||||
bh_lock_sock_nested(ssk);
|
||||
if (!sock_owned_by_user(ssk) &&
|
||||
mptcp_subflow_has_delegated_action(subflow))
|
||||
mptcp_subflow_process_delegated(ssk);
|
||||
/* ... elsewhere tcp_release_cb_override already processed
|
||||
* the action or will do at next release_sock().
|
||||
* In both case must dequeue the subflow here - on the same
|
||||
* CPU that scheduled it.
|
||||
*/
|
||||
bh_unlock_sock(ssk);
|
||||
sock_put(ssk);
|
||||
|
||||
if (++work_done == budget)
|
||||
return budget;
|
||||
}
|
||||
|
||||
/* always provide a 0 'work_done' argument, so that napi_complete_done
|
||||
* will not try accessing the NULL napi->dev ptr
|
||||
*/
|
||||
napi_complete_done(napi, 0);
|
||||
return work_done;
|
||||
}
|
||||
|
||||
void __init mptcp_proto_init(void)
|
||||
{
|
||||
struct mptcp_delegated_action *delegated;
|
||||
int cpu;
|
||||
|
||||
mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
|
||||
|
||||
if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
|
||||
panic("Failed to allocate MPTCP pcpu counter\n");
|
||||
|
||||
init_dummy_netdev(&mptcp_napi_dev);
|
||||
for_each_possible_cpu(cpu) {
|
||||
delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
|
||||
INIT_LIST_HEAD(&delegated->head);
|
||||
netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
napi_enable(&delegated->napi);
|
||||
}
|
||||
|
||||
mptcp_subflow_init();
|
||||
mptcp_pm_init();
|
||||
mptcp_token_init();
|
||||
|
|
|
@ -378,6 +378,15 @@ enum mptcp_data_avail {
|
|||
MPTCP_SUBFLOW_OOO_DATA
|
||||
};
|
||||
|
||||
struct mptcp_delegated_action {
|
||||
struct napi_struct napi;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
|
||||
|
||||
#define MPTCP_DELEGATE_SEND 0
|
||||
|
||||
/* MPTCP subflow context */
|
||||
struct mptcp_subflow_context {
|
||||
struct list_head node;/* conn_list of subflows */
|
||||
|
@ -415,6 +424,9 @@ struct mptcp_subflow_context {
|
|||
u8 local_id;
|
||||
u8 remote_id;
|
||||
|
||||
long delegated_status;
|
||||
struct list_head delegated_node; /* link into delegated_action, protected by local BH */
|
||||
|
||||
struct sock *tcp_sock; /* tcp sk backpointer */
|
||||
struct sock *conn; /* parent mptcp_sock */
|
||||
const struct inet_connection_sock_af_ops *icsk_af_ops;
|
||||
|
@ -463,6 +475,61 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
|
|||
spin_unlock_bh(&msk->join_list_lock);
|
||||
}
|
||||
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk);
|
||||
|
||||
static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
struct mptcp_delegated_action *delegated;
|
||||
bool schedule;
|
||||
|
||||
/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
|
||||
* ensures the below list check sees list updates done prior to status
|
||||
* bit changes
|
||||
*/
|
||||
if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
|
||||
/* still on delegated list from previous scheduling */
|
||||
if (!list_empty(&subflow->delegated_node))
|
||||
return;
|
||||
|
||||
/* the caller held the subflow bh socket lock */
|
||||
lockdep_assert_in_softirq();
|
||||
|
||||
delegated = this_cpu_ptr(&mptcp_delegated_actions);
|
||||
schedule = list_empty(&delegated->head);
|
||||
list_add_tail(&subflow->delegated_node, &delegated->head);
|
||||
sock_hold(mptcp_subflow_tcp_sock(subflow));
|
||||
if (schedule)
|
||||
napi_schedule(&delegated->napi);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct mptcp_subflow_context *
|
||||
mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
|
||||
{
|
||||
struct mptcp_subflow_context *ret;
|
||||
|
||||
if (list_empty(&delegated->head))
|
||||
return NULL;
|
||||
|
||||
ret = list_first_entry(&delegated->head, struct mptcp_subflow_context, delegated_node);
|
||||
list_del_init(&ret->delegated_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
|
||||
}
|
||||
|
||||
static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
|
||||
* touching the status bit
|
||||
*/
|
||||
smp_wmb();
|
||||
clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
|
||||
}
|
||||
|
||||
int mptcp_is_enabled(struct net *net);
|
||||
unsigned int mptcp_get_add_addr_timeout(struct net *net);
|
||||
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
|
||||
|
@ -473,6 +540,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
|
|||
void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
||||
struct mptcp_subflow_context *subflow);
|
||||
void mptcp_subflow_reset(struct sock *ssk);
|
||||
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
|
||||
|
||||
/* called with sk socket lock held */
|
||||
int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
|
||||
|
@ -521,6 +589,25 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
|
|||
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
|
||||
}
|
||||
|
||||
static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
|
||||
{
|
||||
if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf))
|
||||
return false;
|
||||
|
||||
WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mptcp_write_space(struct sock *sk)
|
||||
{
|
||||
if (sk_stream_is_writeable(sk)) {
|
||||
/* pairs with memory barrier in mptcp_poll */
|
||||
smp_mb();
|
||||
if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
|
||||
sk_stream_write_space(sk);
|
||||
}
|
||||
}
|
||||
|
||||
void mptcp_destroy_common(struct mptcp_sock *msk);
|
||||
|
||||
void __init mptcp_token_init(void);
|
||||
|
|
|
@ -18,12 +18,15 @@
|
|||
#include <net/tcp.h>
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/transp_v6.h>
|
||||
#endif
|
||||
#include <net/mptcp.h>
|
||||
#include <uapi/linux/mptcp.h>
|
||||
#include "protocol.h"
|
||||
#include "mib.h"
|
||||
|
||||
static void mptcp_subflow_ops_undo_override(struct sock *ssk);
|
||||
|
||||
static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
|
||||
enum linux_mptcp_mib_field field)
|
||||
{
|
||||
|
@ -343,6 +346,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
|
|||
if (subflow->conn_finished)
|
||||
return;
|
||||
|
||||
mptcp_propagate_sndbuf(parent, sk);
|
||||
subflow->rel_write_seq = 1;
|
||||
subflow->conn_finished = 1;
|
||||
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
|
||||
|
@ -427,6 +431,7 @@ drop:
|
|||
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
|
||||
static struct inet_connection_sock_af_ops subflow_v6_specific;
|
||||
static struct inet_connection_sock_af_ops subflow_v6m_specific;
|
||||
static struct proto tcpv6_prot_override;
|
||||
|
||||
static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -508,6 +513,8 @@ static void subflow_ulp_fallback(struct sock *sk,
|
|||
icsk->icsk_ulp_ops = NULL;
|
||||
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
|
||||
tcp_sk(sk)->is_mptcp = 0;
|
||||
|
||||
mptcp_subflow_ops_undo_override(sk);
|
||||
}
|
||||
|
||||
static void subflow_drop_ctx(struct sock *ssk)
|
||||
|
@ -681,6 +688,7 @@ dispose_child:
|
|||
}
|
||||
|
||||
static struct inet_connection_sock_af_ops subflow_specific;
|
||||
static struct proto tcp_prot_override;
|
||||
|
||||
enum mapping_status {
|
||||
MAPPING_OK,
|
||||
|
@ -1040,7 +1048,10 @@ static void subflow_data_ready(struct sock *sk)
|
|||
|
||||
static void subflow_write_space(struct sock *ssk)
|
||||
{
|
||||
/* we take action in __mptcp_clean_una() */
|
||||
struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
|
||||
|
||||
mptcp_propagate_sndbuf(sk, ssk);
|
||||
mptcp_write_space(sk);
|
||||
}
|
||||
|
||||
static struct inet_connection_sock_af_ops *
|
||||
|
@ -1159,6 +1170,9 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
|
|||
if (err && err != -EINPROGRESS)
|
||||
goto failed_unlink;
|
||||
|
||||
/* discard the subflow socket */
|
||||
mptcp_sock_graft(ssk, sk->sk_socket);
|
||||
iput(SOCK_INODE(sf));
|
||||
return err;
|
||||
|
||||
failed_unlink:
|
||||
|
@ -1196,6 +1210,25 @@ static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
|
|||
#endif /* CONFIG_SOCK_CGROUP_DATA */
|
||||
}
|
||||
|
||||
static void mptcp_subflow_ops_override(struct sock *ssk)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
if (ssk->sk_prot == &tcpv6_prot)
|
||||
ssk->sk_prot = &tcpv6_prot_override;
|
||||
else
|
||||
#endif
|
||||
ssk->sk_prot = &tcp_prot_override;
|
||||
}
|
||||
|
||||
static void mptcp_subflow_ops_undo_override(struct sock *ssk)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
if (ssk->sk_prot == &tcpv6_prot_override)
|
||||
ssk->sk_prot = &tcpv6_prot;
|
||||
else
|
||||
#endif
|
||||
ssk->sk_prot = &tcp_prot;
|
||||
}
|
||||
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
|
@ -1251,6 +1284,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
|
|||
*new_sock = sf;
|
||||
sock_hold(sk);
|
||||
subflow->conn = sk;
|
||||
mptcp_subflow_ops_override(sf->sk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1267,6 +1301,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
|
|||
|
||||
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
|
||||
INIT_LIST_HEAD(&ctx->node);
|
||||
INIT_LIST_HEAD(&ctx->delegated_node);
|
||||
|
||||
pr_debug("subflow=%p", ctx);
|
||||
|
||||
|
@ -1299,6 +1334,7 @@ static void subflow_state_change(struct sock *sk)
|
|||
__subflow_state_change(sk);
|
||||
|
||||
if (subflow_simultaneous_connect(sk)) {
|
||||
mptcp_propagate_sndbuf(parent, sk);
|
||||
mptcp_do_fallback(sk);
|
||||
mptcp_rcv_space_init(mptcp_sk(parent), sk);
|
||||
pr_fallback(mptcp_sk(parent));
|
||||
|
@ -1378,6 +1414,7 @@ static void subflow_ulp_release(struct sock *ssk)
|
|||
sock_put(sk);
|
||||
}
|
||||
|
||||
mptcp_subflow_ops_undo_override(ssk);
|
||||
if (release)
|
||||
kfree_rcu(ctx, rcu);
|
||||
}
|
||||
|
@ -1431,6 +1468,16 @@ static void subflow_ulp_clone(const struct request_sock *req,
|
|||
}
|
||||
}
|
||||
|
||||
static void tcp_release_cb_override(struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
|
||||
if (mptcp_subflow_has_delegated_action(subflow))
|
||||
mptcp_subflow_process_delegated(ssk);
|
||||
|
||||
tcp_release_cb(ssk);
|
||||
}
|
||||
|
||||
static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
|
||||
.name = "mptcp",
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -1471,6 +1518,9 @@ void __init mptcp_subflow_init(void)
|
|||
subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
|
||||
subflow_specific.sk_rx_dst_set = subflow_finish_connect;
|
||||
|
||||
tcp_prot_override = tcp_prot;
|
||||
tcp_prot_override.release_cb = tcp_release_cb_override;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
|
||||
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
|
||||
|
@ -1486,6 +1536,9 @@ void __init mptcp_subflow_init(void)
|
|||
subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
|
||||
subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
|
||||
subflow_v6m_specific.net_frag_header_len = 0;
|
||||
|
||||
tcpv6_prot_override = tcpv6_prot;
|
||||
tcpv6_prot_override.release_cb = tcp_release_cb_override;
|
||||
#endif
|
||||
|
||||
mptcp_diag_subflow_init(&subflow_ulp_ops);
|
||||
|
|
Загрузка…
Ссылка в новой задаче