mptcp: allow the in kernel PM to set MPC subflow priority

Any local endpoints configured on the address matching the
MPC subflow are currently ignored.

Specifically, setting a backup flag on them has no effect
on the first subflow, as the MPC handshake can't carry such
info.

This change refactors the MPC endpoint id accounting to
additionally fetch the priority info from the relevant endpoint
and eventually trigger the MP_PRIO handshake as needed.

As a result, the MPC subflow now switches to backup priority
after that the MPTCP socket is fully established, according
to the local endpoint configuration.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Paolo Abeni 2022-07-11 12:16:31 -07:00 коммит произвёл Jakub Kicinski
Родитель bedee0b561
Коммит c157bbe776
1 изменённых файлов: 15 добавлений и 22 удалений

Просмотреть файл

@ -514,30 +514,14 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
struct mptcp_pm_addr_entry *entry; struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) { list_for_each_entry(entry, &pernet->local_addr_list, list) {
if ((!lookup_by_id && mptcp_addresses_equal(&entry->addr, info, true)) || if ((!lookup_by_id &&
mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
(lookup_by_id && entry->addr.id == info->id)) (lookup_by_id && entry->addr.id == info->id))
return entry; return entry;
} }
return NULL; return NULL;
} }
static int
lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
{
const struct mptcp_pm_addr_entry *entry;
int ret = -1;
rcu_read_lock();
list_for_each_entry(entry, &pernet->local_addr_list, list) {
if (mptcp_addresses_equal(&entry->addr, addr, entry->addr.port)) {
ret = entry->addr.id;
break;
}
}
rcu_read_unlock();
return ret;
}
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{ {
struct sock *sk = (struct sock *)msk; struct sock *sk = (struct sock *)msk;
@ -555,13 +539,22 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* do lazy endpoint usage accounting for the MPC subflows */ /* do lazy endpoint usage accounting for the MPC subflows */
if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
struct mptcp_pm_addr_entry *entry;
struct mptcp_addr_info mpc_addr; struct mptcp_addr_info mpc_addr;
int mpc_id; bool backup = false;
local_address((struct sock_common *)msk->first, &mpc_addr); local_address((struct sock_common *)msk->first, &mpc_addr);
mpc_id = lookup_id_by_addr(pernet, &mpc_addr); rcu_read_lock();
if (mpc_id >= 0) entry = __lookup_addr(pernet, &mpc_addr, false);
__clear_bit(mpc_id, msk->pm.id_avail_bitmap); if (entry) {
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
}
rcu_read_unlock();
if (backup)
mptcp_pm_send_ack(msk, subflow, true, backup);
msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
} }