mptcp: do not reset MP_CAPABLE subflow on mapping errors

When some mapping related errors occurs we close the main
MPC subflow with a RST. We should instead fallback gracefully
to TCP, and do the reset only for MPJ subflows.

Fixes: d22f4988ff ("mptcp: process MP_CAPABLE data option")
Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/192
Reported-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Paolo Abeni 2021-05-27 16:31:39 -07:00 коммит произвёл Jakub Kicinski
Родитель 06f9a435b3
Коммит dea2b1ea9c
1 изменённых файлов: 32 добавлений и 30 удалений

Просмотреть файл

@ -1011,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
status = get_mapping_status(ssk, msk); status = get_mapping_status(ssk, msk);
trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
if (status == MAPPING_INVALID) { if (unlikely(status == MAPPING_INVALID))
ssk->sk_err = EBADMSG; goto fallback;
goto fatal;
} if (unlikely(status == MAPPING_DUMMY))
if (status == MAPPING_DUMMY) { goto fallback;
__mptcp_do_fallback(msk);
skb = skb_peek(&ssk->sk_receive_queue);
subflow->map_valid = 1;
subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
subflow->ssn_offset;
subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
return true;
}
if (status != MAPPING_OK) if (status != MAPPING_OK)
goto no_data; goto no_data;
@ -1038,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
* MP_CAPABLE-based mapping * MP_CAPABLE-based mapping
*/ */
if (unlikely(!READ_ONCE(msk->can_ack))) { if (unlikely(!READ_ONCE(msk->can_ack))) {
if (!subflow->mpc_map) { if (!subflow->mpc_map)
ssk->sk_err = EBADMSG; goto fallback;
goto fatal;
}
WRITE_ONCE(msk->remote_key, subflow->remote_key); WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->ack_seq, subflow->map_seq); WRITE_ONCE(msk->ack_seq, subflow->map_seq);
WRITE_ONCE(msk->can_ack, true); WRITE_ONCE(msk->can_ack, true);
@ -1069,17 +1057,31 @@ static bool subflow_check_data_avail(struct sock *ssk)
no_data: no_data:
subflow_sched_work_if_closed(msk, ssk); subflow_sched_work_if_closed(msk, ssk);
return false; return false;
fatal:
/* fatal protocol error, close the socket */ fallback:
/* This barrier is coupled with smp_rmb() in tcp_poll() */ /* RFC 8684 section 3.7. */
smp_wmb(); if (subflow->mp_join || subflow->fully_established) {
ssk->sk_error_report(ssk); /* fatal protocol error, close the socket.
tcp_set_state(ssk, TCP_CLOSE); * subflow_error_report() will introduce the appropriate barriers
subflow->reset_transient = 0; */
subflow->reset_reason = MPTCP_RST_EMPTCP; ssk->sk_err = EBADMSG;
tcp_send_active_reset(ssk, GFP_ATOMIC); ssk->sk_error_report(ssk);
subflow->data_avail = 0; tcp_set_state(ssk, TCP_CLOSE);
return false; subflow->reset_transient = 0;
subflow->reset_reason = MPTCP_RST_EMPTCP;
tcp_send_active_reset(ssk, GFP_ATOMIC);
subflow->data_avail = 0;
return false;
}
__mptcp_do_fallback(msk);
skb = skb_peek(&ssk->sk_receive_queue);
subflow->map_valid = 1;
subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
return true;
} }
bool mptcp_subflow_data_available(struct sock *sk) bool mptcp_subflow_data_available(struct sock *sk)