Merge branch 'mptcp-fixes-and-maintainer-email-update-for-v6-6'
Mat Martineau says: ==================== mptcp: Fixes and maintainer email update for v6.6 Patch 1 addresses a race condition in MPTCP "delegated actions" infrastructure. Affects v5.19 and later. Patch 2 removes an unnecessary restriction that did not allow additional outgoing subflows using the local address of the initial MPTCP subflow. v5.16 and later. Patch 3 updates Matthieu's email address. ==================== Link: https://lore.kernel.org/r/20231004-send-net-20231004-v1-0-28de4ac663ae@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Коммит
c29d984580
1
.mailmap
1
.mailmap
|
@ -377,6 +377,7 @@ Matthew Wilcox <willy@infradead.org> <willy@debian.org>
|
|||
Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
|
||||
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
|
||||
Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
|
||||
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
|
||||
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
|
||||
|
|
|
@ -14942,7 +14942,7 @@ K: macsec
|
|||
K: \bmdo_
|
||||
|
||||
NETWORKING [MPTCP]
|
||||
M: Matthieu Baerts <matthieu.baerts@tessares.net>
|
||||
M: Matthieu Baerts <matttbe@kernel.org>
|
||||
M: Mat Martineau <martineau@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: mptcp@lists.linux.dev
|
||||
|
|
|
@ -307,12 +307,6 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
|||
goto create_err;
|
||||
}
|
||||
|
||||
if (addr_l.id == 0) {
|
||||
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
|
||||
err = -EINVAL;
|
||||
goto create_err;
|
||||
}
|
||||
|
||||
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
|
||||
if (err < 0) {
|
||||
NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
|
||||
|
|
|
@ -3425,24 +3425,21 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
|
|||
sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
|
||||
}
|
||||
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk)
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk, long status)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
struct sock *sk = subflow->conn;
|
||||
|
||||
if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
|
||||
if (status & BIT(MPTCP_DELEGATE_SEND)) {
|
||||
mptcp_data_lock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
__mptcp_subflow_push_pending(sk, ssk, true);
|
||||
else
|
||||
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
|
||||
mptcp_data_unlock(sk);
|
||||
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
|
||||
}
|
||||
if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
|
||||
if (status & BIT(MPTCP_DELEGATE_ACK))
|
||||
schedule_3rdack_retransmission(ssk);
|
||||
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
|
||||
}
|
||||
}
|
||||
|
||||
static int mptcp_hash(struct sock *sk)
|
||||
|
@ -3968,14 +3965,17 @@ static int mptcp_napi_poll(struct napi_struct *napi, int budget)
|
|||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
|
||||
bh_lock_sock_nested(ssk);
|
||||
if (!sock_owned_by_user(ssk) &&
|
||||
mptcp_subflow_has_delegated_action(subflow))
|
||||
mptcp_subflow_process_delegated(ssk);
|
||||
/* ... elsewhere tcp_release_cb_override already processed
|
||||
* the action or will do at next release_sock().
|
||||
* In both case must dequeue the subflow here - on the same
|
||||
* CPU that scheduled it.
|
||||
*/
|
||||
if (!sock_owned_by_user(ssk)) {
|
||||
mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
|
||||
} else {
|
||||
/* tcp_release_cb_override already processed
|
||||
* the action or will do at next release_sock().
|
||||
* In both case must dequeue the subflow here - on the same
|
||||
* CPU that scheduled it.
|
||||
*/
|
||||
smp_wmb();
|
||||
clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
|
||||
}
|
||||
bh_unlock_sock(ssk);
|
||||
sock_put(ssk);
|
||||
|
||||
|
|
|
@ -444,9 +444,11 @@ struct mptcp_delegated_action {
|
|||
|
||||
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
|
||||
|
||||
#define MPTCP_DELEGATE_SEND 0
|
||||
#define MPTCP_DELEGATE_ACK 1
|
||||
#define MPTCP_DELEGATE_SCHEDULED 0
|
||||
#define MPTCP_DELEGATE_SEND 1
|
||||
#define MPTCP_DELEGATE_ACK 2
|
||||
|
||||
#define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED))
|
||||
/* MPTCP subflow context */
|
||||
struct mptcp_subflow_context {
|
||||
struct list_head node;/* conn_list of subflows */
|
||||
|
@ -564,23 +566,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
|
|||
return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
|
||||
}
|
||||
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk);
|
||||
void mptcp_subflow_process_delegated(struct sock *ssk, long actions);
|
||||
|
||||
static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
|
||||
{
|
||||
long old, set_bits = BIT(MPTCP_DELEGATE_SCHEDULED) | BIT(action);
|
||||
struct mptcp_delegated_action *delegated;
|
||||
bool schedule;
|
||||
|
||||
/* the caller held the subflow bh socket lock */
|
||||
lockdep_assert_in_softirq();
|
||||
|
||||
/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
|
||||
* ensures the below list check sees list updates done prior to status
|
||||
* bit changes
|
||||
/* The implied barrier pairs with tcp_release_cb_override()
|
||||
* mptcp_napi_poll(), and ensures the below list check sees list
|
||||
* updates done prior to delegated status bits changes
|
||||
*/
|
||||
if (!test_and_set_bit(action, &subflow->delegated_status)) {
|
||||
/* still on delegated list from previous scheduling */
|
||||
if (!list_empty(&subflow->delegated_node))
|
||||
old = set_mask_bits(&subflow->delegated_status, 0, set_bits);
|
||||
if (!(old & BIT(MPTCP_DELEGATE_SCHEDULED))) {
|
||||
if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
|
||||
return;
|
||||
|
||||
delegated = this_cpu_ptr(&mptcp_delegated_actions);
|
||||
|
@ -605,20 +608,6 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
return !!READ_ONCE(subflow->delegated_status);
|
||||
}
|
||||
|
||||
static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
|
||||
{
|
||||
/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
|
||||
* touching the status bit
|
||||
*/
|
||||
smp_wmb();
|
||||
clear_bit(action, &subflow->delegated_status);
|
||||
}
|
||||
|
||||
int mptcp_is_enabled(const struct net *net);
|
||||
unsigned int mptcp_get_add_addr_timeout(const struct net *net);
|
||||
int mptcp_is_checksum_enabled(const struct net *net);
|
||||
|
|
|
@ -1956,9 +1956,15 @@ static void subflow_ulp_clone(const struct request_sock *req,
|
|||
static void tcp_release_cb_override(struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
long status;
|
||||
|
||||
if (mptcp_subflow_has_delegated_action(subflow))
|
||||
mptcp_subflow_process_delegated(ssk);
|
||||
/* process and clear all the pending actions, but leave the subflow into
|
||||
* the napi queue. To respect locking, only the same CPU that originated
|
||||
* the action can touch the list. mptcp_napi_poll will take care of it.
|
||||
*/
|
||||
status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
|
||||
if (status)
|
||||
mptcp_subflow_process_delegated(ssk, status);
|
||||
|
||||
tcp_release_cb(ssk);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче