Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "One more set of fixes from the networking tree: - add missing input validation in nl80211_del_key(), preventing out-of-bounds access - last minute fix / improvement of a MRP netlink (uAPI) interface introduced in 5.9 (current) release - fix "unresolved symbol" build error under CONFIG_NET w/o CONFIG_INET due to missing tcp_timewait_sock and inet_timewait_sock BTF. - fix 32 bit sub-register bounds tracking in the bpf verifier for OR case - tcp: fix receive window update in tcp_add_backlog() - openvswitch: handle DNAT tuple collision in conntrack-related code - r8169: wait for potential PHY reset to finish after applying a FW file, avoiding unexpected PHY behaviour and failures later on - mscc: fix tail dropping watermarks for Ocelot switches - avoid use-after-free in macsec code after a call to the GRO layer - avoid use-after-free in sctp error paths - add a device id for Cellient MPL200 WWAN card - rxrpc fixes: - fix the xdr encoding of the contents read from an rxrpc key - fix a BUG() for a unsupported encoding type. - fix missing _bh lock annotations. - fix acceptance handling for an incoming call where the incoming call is encrypted. - the server token keyring isn't network namespaced - it belongs to the server, so there's no need. Namespacing it means that request_key() fails to find it. - fix a leak of the server keyring" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (21 commits) net: usb: qmi_wwan: add Cellient MPL200 card macsec: avoid use-after-free in macsec_handle_frame() r8169: consider that PHY reset may still be in progress after applying firmware openvswitch: handle DNAT tuple collision sctp: fix sctp_auth_init_hmacs() error path bridge: Netlink interface fix. net: wireless: nl80211: fix out-of-bounds access in nl80211_del_key() bpf: Fix scalar32_min_max_or bounds tracking tcp: fix receive window update in tcp_add_backlog() net: usb: rtl8150: set random MAC address when set_ethernet_addr() fails mptcp: more DATA FIN fixes net: mscc: ocelot: warn when encoding an out-of-bounds watermark value net: mscc: ocelot: divide watermark value by 60 when writing to SYS_ATOP net: qrtr: ns: Fix the incorrect usage of rcu_read_lock() rxrpc: Fix server keyring leak rxrpc: The server keyring isn't network-namespaced rxrpc: Fix accept on a connection that need securing rxrpc: Fix some missing _bh annotations on locking conn->state_lock rxrpc: Downgrade the BUG() for unsupported token type in rxrpc_read() rxrpc: Fix rxkad token xdr encoding ...
This commit is contained in:
Коммит
6288c1d802
|
@ -1171,6 +1171,8 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
|
||||||
*/
|
*/
|
||||||
static u16 vsc9959_wm_enc(u16 value)
|
static u16 vsc9959_wm_enc(u16 value)
|
||||||
{
|
{
|
||||||
|
WARN_ON(value >= 16 * BIT(8));
|
||||||
|
|
||||||
if (value >= BIT(8))
|
if (value >= BIT(8))
|
||||||
return BIT(8) | (value / 16);
|
return BIT(8) | (value / 16);
|
||||||
|
|
||||||
|
|
|
@ -911,6 +911,8 @@ static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
|
||||||
*/
|
*/
|
||||||
static u16 vsc9953_wm_enc(u16 value)
|
static u16 vsc9953_wm_enc(u16 value)
|
||||||
{
|
{
|
||||||
|
WARN_ON(value >= 16 * BIT(9));
|
||||||
|
|
||||||
if (value >= BIT(9))
|
if (value >= BIT(9))
|
||||||
return BIT(9) | (value / 16);
|
return BIT(9) | (value / 16);
|
||||||
|
|
||||||
|
|
|
@ -1253,7 +1253,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
|
||||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||||
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
|
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
|
||||||
int pause_start, pause_stop;
|
int pause_start, pause_stop;
|
||||||
int atop_wm;
|
int atop, atop_tot;
|
||||||
|
|
||||||
if (port == ocelot->npi) {
|
if (port == ocelot->npi) {
|
||||||
maxlen += OCELOT_TAG_LEN;
|
maxlen += OCELOT_TAG_LEN;
|
||||||
|
@ -1274,12 +1274,12 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
|
||||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
|
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
|
||||||
pause_stop);
|
pause_stop);
|
||||||
|
|
||||||
/* Tail dropping watermark */
|
/* Tail dropping watermarks */
|
||||||
atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
|
atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
|
||||||
OCELOT_BUFFER_CELL_SZ;
|
OCELOT_BUFFER_CELL_SZ;
|
||||||
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(9 * maxlen),
|
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
|
||||||
SYS_ATOP, port);
|
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
|
||||||
ocelot_write(ocelot, ocelot->ops->wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
|
ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ocelot_port_set_maxlen);
|
EXPORT_SYMBOL(ocelot_port_set_maxlen);
|
||||||
|
|
||||||
|
|
|
@ -745,6 +745,8 @@ static int ocelot_reset(struct ocelot *ocelot)
|
||||||
*/
|
*/
|
||||||
static u16 ocelot_wm_enc(u16 value)
|
static u16 ocelot_wm_enc(u16 value)
|
||||||
{
|
{
|
||||||
|
WARN_ON(value >= 16 * BIT(8));
|
||||||
|
|
||||||
if (value >= BIT(8))
|
if (value >= BIT(8))
|
||||||
return BIT(8) | (value / 16);
|
return BIT(8) | (value / 16);
|
||||||
|
|
||||||
|
|
|
@ -2058,11 +2058,18 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
|
||||||
|
|
||||||
void r8169_apply_firmware(struct rtl8169_private *tp)
|
void r8169_apply_firmware(struct rtl8169_private *tp)
|
||||||
{
|
{
|
||||||
|
int val;
|
||||||
|
|
||||||
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
|
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
|
||||||
if (tp->rtl_fw) {
|
if (tp->rtl_fw) {
|
||||||
rtl_fw_write_firmware(tp, tp->rtl_fw);
|
rtl_fw_write_firmware(tp, tp->rtl_fw);
|
||||||
/* At least one firmware doesn't reset tp->ocp_base. */
|
/* At least one firmware doesn't reset tp->ocp_base. */
|
||||||
tp->ocp_base = OCP_STD_PHY_BASE;
|
tp->ocp_base = OCP_STD_PHY_BASE;
|
||||||
|
|
||||||
|
/* PHY soft reset may still be in progress */
|
||||||
|
phy_read_poll_timeout(tp->phydev, MII_BMCR, val,
|
||||||
|
!(val & BMCR_RESET),
|
||||||
|
50000, 600000, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1077,6 +1077,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
||||||
struct macsec_rx_sa *rx_sa;
|
struct macsec_rx_sa *rx_sa;
|
||||||
struct macsec_rxh_data *rxd;
|
struct macsec_rxh_data *rxd;
|
||||||
struct macsec_dev *macsec;
|
struct macsec_dev *macsec;
|
||||||
|
unsigned int len;
|
||||||
sci_t sci;
|
sci_t sci;
|
||||||
u32 hdr_pn;
|
u32 hdr_pn;
|
||||||
bool cbit;
|
bool cbit;
|
||||||
|
@ -1232,9 +1233,10 @@ deliver:
|
||||||
macsec_rxsc_put(rx_sc);
|
macsec_rxsc_put(rx_sc);
|
||||||
|
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
|
len = skb->len;
|
||||||
ret = gro_cells_receive(&macsec->gro_cells, skb);
|
ret = gro_cells_receive(&macsec->gro_cells, skb);
|
||||||
if (ret == NET_RX_SUCCESS)
|
if (ret == NET_RX_SUCCESS)
|
||||||
count_rx(dev, skb->len);
|
count_rx(dev, len);
|
||||||
else
|
else
|
||||||
macsec->secy.netdev->stats.rx_dropped++;
|
macsec->secy.netdev->stats.rx_dropped++;
|
||||||
|
|
||||||
|
|
|
@ -1375,6 +1375,7 @@ static const struct usb_device_id products[] = {
|
||||||
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
|
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
|
||||||
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
|
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
|
||||||
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
|
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
|
||||||
|
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
||||||
|
|
||||||
/* 4. Gobi 1000 devices */
|
/* 4. Gobi 1000 devices */
|
||||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||||
|
|
|
@ -274,12 +274,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_ethernet_addr(rtl8150_t * dev)
|
static void set_ethernet_addr(rtl8150_t *dev)
|
||||||
{
|
{
|
||||||
u8 node_id[6];
|
u8 node_id[ETH_ALEN];
|
||||||
|
int ret;
|
||||||
|
|
||||||
get_registers(dev, IDR, sizeof(node_id), node_id);
|
ret = get_registers(dev, IDR, sizeof(node_id), node_id);
|
||||||
memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id));
|
|
||||||
|
if (ret == sizeof(node_id)) {
|
||||||
|
ether_addr_copy(dev->netdev->dev_addr, node_id);
|
||||||
|
} else {
|
||||||
|
eth_hw_addr_random(dev->netdev);
|
||||||
|
netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
|
||||||
|
dev->netdev->dev_addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
|
static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
|
||||||
|
|
|
@ -51,11 +51,11 @@ enum rxrpc_cmsg_type {
|
||||||
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
|
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
|
||||||
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
|
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
|
||||||
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
|
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
|
||||||
RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
|
|
||||||
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
|
||||||
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
|
||||||
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
|
||||||
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
|
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
|
||||||
|
RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */
|
||||||
RXRPC__SUPPORTED
|
RXRPC__SUPPORTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -5667,8 +5667,8 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
|
||||||
bool src_known = tnum_subreg_is_const(src_reg->var_off);
|
bool src_known = tnum_subreg_is_const(src_reg->var_off);
|
||||||
bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
|
bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
|
||||||
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
|
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
|
||||||
s32 smin_val = src_reg->smin_value;
|
s32 smin_val = src_reg->s32_min_value;
|
||||||
u32 umin_val = src_reg->umin_value;
|
u32 umin_val = src_reg->u32_min_value;
|
||||||
|
|
||||||
/* Assuming scalar64_min_max_or will be called so it is safe
|
/* Assuming scalar64_min_max_or will be called so it is safe
|
||||||
* to skip updating register for known case.
|
* to skip updating register for known case.
|
||||||
|
@ -5691,8 +5691,8 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
|
||||||
/* ORing two positives gives a positive, so safe to
|
/* ORing two positives gives a positive, so safe to
|
||||||
* cast result into s64.
|
* cast result into s64.
|
||||||
*/
|
*/
|
||||||
dst_reg->s32_min_value = dst_reg->umin_value;
|
dst_reg->s32_min_value = dst_reg->u32_min_value;
|
||||||
dst_reg->s32_max_value = dst_reg->umax_value;
|
dst_reg->s32_max_value = dst_reg->u32_max_value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -380,6 +380,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
|
||||||
u32 filter_mask, const struct net_device *dev)
|
u32 filter_mask, const struct net_device *dev)
|
||||||
{
|
{
|
||||||
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
|
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
|
||||||
|
struct nlattr *af = NULL;
|
||||||
struct net_bridge *br;
|
struct net_bridge *br;
|
||||||
struct ifinfomsg *hdr;
|
struct ifinfomsg *hdr;
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
|
@ -423,11 +424,18 @@ static int br_fill_ifinfo(struct sk_buff *skb,
|
||||||
nla_nest_end(skb, nest);
|
nla_nest_end(skb, nest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (filter_mask & (RTEXT_FILTER_BRVLAN |
|
||||||
|
RTEXT_FILTER_BRVLAN_COMPRESSED |
|
||||||
|
RTEXT_FILTER_MRP)) {
|
||||||
|
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
|
||||||
|
if (!af)
|
||||||
|
goto nla_put_failure;
|
||||||
|
}
|
||||||
|
|
||||||
/* Check if the VID information is requested */
|
/* Check if the VID information is requested */
|
||||||
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
|
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
|
||||||
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
|
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
|
||||||
struct net_bridge_vlan_group *vg;
|
struct net_bridge_vlan_group *vg;
|
||||||
struct nlattr *af;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
|
/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
|
||||||
|
@ -441,11 +449,6 @@ static int br_fill_ifinfo(struct sk_buff *skb,
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
|
|
||||||
if (!af) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
goto nla_put_failure;
|
|
||||||
}
|
|
||||||
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
|
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
|
||||||
err = br_fill_ifvlaninfo_compressed(skb, vg);
|
err = br_fill_ifvlaninfo_compressed(skb, vg);
|
||||||
else
|
else
|
||||||
|
@ -456,32 +459,25 @@ static int br_fill_ifinfo(struct sk_buff *skb,
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (err)
|
if (err)
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
nla_nest_end(skb, af);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (filter_mask & RTEXT_FILTER_MRP) {
|
if (filter_mask & RTEXT_FILTER_MRP) {
|
||||||
struct nlattr *af;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!br_mrp_enabled(br) || port)
|
if (!br_mrp_enabled(br) || port)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
|
|
||||||
if (!af)
|
|
||||||
goto nla_put_failure;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
err = br_mrp_fill_info(skb, br);
|
err = br_mrp_fill_info(skb, br);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
nla_nest_end(skb, af);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
|
if (af)
|
||||||
|
nla_nest_end(skb, af);
|
||||||
nlmsg_end(skb, nlh);
|
nlmsg_end(skb, nlh);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -9558,6 +9558,12 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
|
||||||
|
|
||||||
BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
|
BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
|
||||||
{
|
{
|
||||||
|
/* BTF types for tcp_timewait_sock and inet_timewait_sock are not
|
||||||
|
* generated if CONFIG_INET=n. Trigger an explicit generation here.
|
||||||
|
*/
|
||||||
|
BTF_TYPE_EMIT(struct inet_timewait_sock);
|
||||||
|
BTF_TYPE_EMIT(struct tcp_timewait_sock);
|
||||||
|
|
||||||
#ifdef CONFIG_INET
|
#ifdef CONFIG_INET
|
||||||
if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
|
if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
|
||||||
return (unsigned long)sk;
|
return (unsigned long)sk;
|
||||||
|
|
|
@ -1788,12 +1788,12 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
__skb_pull(skb, hdrlen);
|
__skb_pull(skb, hdrlen);
|
||||||
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
|
if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
|
||||||
thtail->window = th->window;
|
|
||||||
|
|
||||||
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
|
TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||||
|
|
||||||
if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
|
if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
|
||||||
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
|
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
|
||||||
|
thtail->window = th->window;
|
||||||
|
}
|
||||||
|
|
||||||
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
|
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
|
||||||
* thtail->fin, so that the fast path in tcp_rcv_established()
|
* thtail->fin, so that the fast path in tcp_rcv_established()
|
||||||
|
|
|
@ -451,7 +451,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
|
||||||
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
|
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
|
||||||
struct sk_buff *skb, struct mptcp_ext *ext)
|
struct sk_buff *skb, struct mptcp_ext *ext)
|
||||||
{
|
{
|
||||||
u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq);
|
/* The write_seq value has already been incremented, so the actual
|
||||||
|
* sequence number for the DATA_FIN is one less.
|
||||||
|
*/
|
||||||
|
u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
|
||||||
|
|
||||||
if (!ext->use_map || !skb->len) {
|
if (!ext->use_map || !skb->len) {
|
||||||
/* RFC6824 requires a DSS mapping with specific values
|
/* RFC6824 requires a DSS mapping with specific values
|
||||||
|
@ -460,10 +463,7 @@ static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
|
||||||
ext->data_fin = 1;
|
ext->data_fin = 1;
|
||||||
ext->use_map = 1;
|
ext->use_map = 1;
|
||||||
ext->dsn64 = 1;
|
ext->dsn64 = 1;
|
||||||
/* The write_seq value has already been incremented, so
|
ext->data_seq = data_fin_tx_seq;
|
||||||
* the actual sequence number for the DATA_FIN is one less.
|
|
||||||
*/
|
|
||||||
ext->data_seq = data_fin_tx_seq - 1;
|
|
||||||
ext->subflow_seq = 0;
|
ext->subflow_seq = 0;
|
||||||
ext->data_len = 1;
|
ext->data_len = 1;
|
||||||
} else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
|
} else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
|
||||||
|
|
|
@ -749,7 +749,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
||||||
return MAPPING_DATA_FIN;
|
return MAPPING_DATA_FIN;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
u64 data_fin_seq = mpext->data_seq + data_len;
|
u64 data_fin_seq = mpext->data_seq + data_len - 1;
|
||||||
|
|
||||||
/* If mpext->data_seq is a 32-bit value, data_fin_seq
|
/* If mpext->data_seq is a 32-bit value, data_fin_seq
|
||||||
* must also be limited to 32 bits.
|
* must also be limited to 32 bits.
|
||||||
|
|
|
@ -905,15 +905,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
|
||||||
}
|
}
|
||||||
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
||||||
|
|
||||||
if (err == NF_ACCEPT &&
|
if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
|
||||||
ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
|
if (ct->status & IPS_SRC_NAT) {
|
||||||
if (maniptype == NF_NAT_MANIP_SRC)
|
if (maniptype == NF_NAT_MANIP_SRC)
|
||||||
maniptype = NF_NAT_MANIP_DST;
|
maniptype = NF_NAT_MANIP_DST;
|
||||||
else
|
else
|
||||||
maniptype = NF_NAT_MANIP_SRC;
|
maniptype = NF_NAT_MANIP_SRC;
|
||||||
|
|
||||||
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
|
||||||
maniptype);
|
maniptype);
|
||||||
|
} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
|
||||||
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
|
||||||
|
NF_NAT_MANIP_SRC);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark NAT done if successful and update the flow key. */
|
/* Mark NAT done if successful and update the flow key. */
|
||||||
|
|
|
@ -193,7 +193,7 @@ static int announce_servers(struct sockaddr_qrtr *sq)
|
||||||
struct qrtr_server *srv;
|
struct qrtr_server *srv;
|
||||||
struct qrtr_node *node;
|
struct qrtr_node *node;
|
||||||
void __rcu **slot;
|
void __rcu **slot;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
node = node_get(qrtr_ns.local_node);
|
node = node_get(qrtr_ns.local_node);
|
||||||
if (!node)
|
if (!node)
|
||||||
|
@ -203,18 +203,27 @@ static int announce_servers(struct sockaddr_qrtr *sq)
|
||||||
/* Announce the list of servers registered in this node */
|
/* Announce the list of servers registered in this node */
|
||||||
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
|
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
|
||||||
srv = radix_tree_deref_slot(slot);
|
srv = radix_tree_deref_slot(slot);
|
||||||
|
if (!srv)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(srv)) {
|
||||||
|
slot = radix_tree_iter_retry(&iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
slot = radix_tree_iter_resume(slot, &iter);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
ret = service_announce_new(sq, srv);
|
ret = service_announce_new(sq, srv);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("failed to announce new service\n");
|
pr_err("failed to announce new service\n");
|
||||||
goto err_out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
err_out:
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct qrtr_server *server_add(unsigned int service,
|
static struct qrtr_server *server_add(unsigned int service,
|
||||||
|
@ -339,7 +348,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
||||||
struct qrtr_node *node;
|
struct qrtr_node *node;
|
||||||
void __rcu **slot;
|
void __rcu **slot;
|
||||||
struct kvec iv;
|
struct kvec iv;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
iv.iov_base = &pkt;
|
iv.iov_base = &pkt;
|
||||||
iv.iov_len = sizeof(pkt);
|
iv.iov_len = sizeof(pkt);
|
||||||
|
@ -352,7 +361,16 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
||||||
/* Advertise removal of this client to all servers of remote node */
|
/* Advertise removal of this client to all servers of remote node */
|
||||||
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
|
radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
|
||||||
srv = radix_tree_deref_slot(slot);
|
srv = radix_tree_deref_slot(slot);
|
||||||
|
if (!srv)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(srv)) {
|
||||||
|
slot = radix_tree_iter_retry(&iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
slot = radix_tree_iter_resume(slot, &iter);
|
||||||
|
rcu_read_unlock();
|
||||||
server_del(node, srv->port);
|
server_del(node, srv->port);
|
||||||
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
@ -368,6 +386,14 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
|
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
|
||||||
srv = radix_tree_deref_slot(slot);
|
srv = radix_tree_deref_slot(slot);
|
||||||
|
if (!srv)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(srv)) {
|
||||||
|
slot = radix_tree_iter_retry(&iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
slot = radix_tree_iter_resume(slot, &iter);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
sq.sq_family = AF_QIPCRTR;
|
sq.sq_family = AF_QIPCRTR;
|
||||||
sq.sq_node = srv->node;
|
sq.sq_node = srv->node;
|
||||||
|
@ -379,14 +405,14 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
||||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("failed to send bye cmd\n");
|
pr_err("failed to send bye cmd\n");
|
||||||
goto err_out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
err_out:
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
||||||
|
@ -404,7 +430,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
||||||
struct list_head *li;
|
struct list_head *li;
|
||||||
void __rcu **slot;
|
void __rcu **slot;
|
||||||
struct kvec iv;
|
struct kvec iv;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
iv.iov_base = &pkt;
|
iv.iov_base = &pkt;
|
||||||
iv.iov_len = sizeof(pkt);
|
iv.iov_len = sizeof(pkt);
|
||||||
|
@ -447,6 +473,14 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
|
radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
|
||||||
srv = radix_tree_deref_slot(slot);
|
srv = radix_tree_deref_slot(slot);
|
||||||
|
if (!srv)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(srv)) {
|
||||||
|
slot = radix_tree_iter_retry(&iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
slot = radix_tree_iter_resume(slot, &iter);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
sq.sq_family = AF_QIPCRTR;
|
sq.sq_family = AF_QIPCRTR;
|
||||||
sq.sq_node = srv->node;
|
sq.sq_node = srv->node;
|
||||||
|
@ -458,14 +492,14 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
||||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("failed to send del client cmd\n");
|
pr_err("failed to send del client cmd\n");
|
||||||
goto err_out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
err_out:
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
|
static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
|
||||||
|
@ -571,16 +605,34 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
|
radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
|
||||||
node = radix_tree_deref_slot(node_slot);
|
node = radix_tree_deref_slot(node_slot);
|
||||||
|
if (!node)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(node)) {
|
||||||
|
node_slot = radix_tree_iter_retry(&node_iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
node_slot = radix_tree_iter_resume(node_slot, &node_iter);
|
||||||
|
|
||||||
radix_tree_for_each_slot(srv_slot, &node->servers,
|
radix_tree_for_each_slot(srv_slot, &node->servers,
|
||||||
&srv_iter, 0) {
|
&srv_iter, 0) {
|
||||||
struct qrtr_server *srv;
|
struct qrtr_server *srv;
|
||||||
|
|
||||||
srv = radix_tree_deref_slot(srv_slot);
|
srv = radix_tree_deref_slot(srv_slot);
|
||||||
|
if (!srv)
|
||||||
|
continue;
|
||||||
|
if (radix_tree_deref_retry(srv)) {
|
||||||
|
srv_slot = radix_tree_iter_retry(&srv_iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!server_match(srv, &filter))
|
if (!server_match(srv, &filter))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
srv_slot = radix_tree_iter_resume(srv_slot, &srv_iter);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
lookup_notify(from, srv, true);
|
lookup_notify(from, srv, true);
|
||||||
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -518,7 +518,6 @@ enum rxrpc_call_state {
|
||||||
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
||||||
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
|
RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
|
||||||
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
||||||
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
|
|
||||||
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
||||||
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
||||||
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
||||||
|
@ -714,8 +713,8 @@ struct rxrpc_ack_summary {
|
||||||
enum rxrpc_command {
|
enum rxrpc_command {
|
||||||
RXRPC_CMD_SEND_DATA, /* send data message */
|
RXRPC_CMD_SEND_DATA, /* send data message */
|
||||||
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
||||||
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
|
|
||||||
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
||||||
|
RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rxrpc_call_params {
|
struct rxrpc_call_params {
|
||||||
|
@ -755,9 +754,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
|
||||||
struct rxrpc_sock *,
|
struct rxrpc_sock *,
|
||||||
struct sk_buff *);
|
struct sk_buff *);
|
||||||
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
|
||||||
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
|
int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
|
||||||
rxrpc_notify_rx_t);
|
|
||||||
int rxrpc_reject_call(struct rxrpc_sock *);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call_event.c
|
* call_event.c
|
||||||
|
|
|
@ -39,8 +39,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||||
unsigned int debug_id)
|
unsigned int debug_id)
|
||||||
{
|
{
|
||||||
const void *here = __builtin_return_address(0);
|
const void *here = __builtin_return_address(0);
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_call *call, *xcall;
|
||||||
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
||||||
|
struct rb_node *parent, **pp;
|
||||||
int max, tmp;
|
int max, tmp;
|
||||||
unsigned int size = RXRPC_BACKLOG_MAX;
|
unsigned int size = RXRPC_BACKLOG_MAX;
|
||||||
unsigned int head, tail, call_head, call_tail;
|
unsigned int head, tail, call_head, call_tail;
|
||||||
|
@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now it gets complicated, because calls get registered with the
|
/* Now it gets complicated, because calls get registered with the
|
||||||
* socket here, particularly if a user ID is preassigned by the user.
|
* socket here, with a user ID preassigned by the user.
|
||||||
*/
|
*/
|
||||||
call = rxrpc_alloc_call(rx, gfp, debug_id);
|
call = rxrpc_alloc_call(rx, gfp, debug_id);
|
||||||
if (!call)
|
if (!call)
|
||||||
|
@ -107,34 +108,33 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||||
here, (const void *)user_call_ID);
|
here, (const void *)user_call_ID);
|
||||||
|
|
||||||
write_lock(&rx->call_lock);
|
write_lock(&rx->call_lock);
|
||||||
|
|
||||||
|
/* Check the user ID isn't already in use */
|
||||||
|
pp = &rx->calls.rb_node;
|
||||||
|
parent = NULL;
|
||||||
|
while (*pp) {
|
||||||
|
parent = *pp;
|
||||||
|
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
||||||
|
if (user_call_ID < xcall->user_call_ID)
|
||||||
|
pp = &(*pp)->rb_left;
|
||||||
|
else if (user_call_ID > xcall->user_call_ID)
|
||||||
|
pp = &(*pp)->rb_right;
|
||||||
|
else
|
||||||
|
goto id_in_use;
|
||||||
|
}
|
||||||
|
|
||||||
|
call->user_call_ID = user_call_ID;
|
||||||
|
call->notify_rx = notify_rx;
|
||||||
if (user_attach_call) {
|
if (user_attach_call) {
|
||||||
struct rxrpc_call *xcall;
|
|
||||||
struct rb_node *parent, **pp;
|
|
||||||
|
|
||||||
/* Check the user ID isn't already in use */
|
|
||||||
pp = &rx->calls.rb_node;
|
|
||||||
parent = NULL;
|
|
||||||
while (*pp) {
|
|
||||||
parent = *pp;
|
|
||||||
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
||||||
if (user_call_ID < xcall->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_left;
|
|
||||||
else if (user_call_ID > xcall->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_right;
|
|
||||||
else
|
|
||||||
goto id_in_use;
|
|
||||||
}
|
|
||||||
|
|
||||||
call->user_call_ID = user_call_ID;
|
|
||||||
call->notify_rx = notify_rx;
|
|
||||||
rxrpc_get_call(call, rxrpc_call_got_kernel);
|
rxrpc_get_call(call, rxrpc_call_got_kernel);
|
||||||
user_attach_call(call, user_call_ID);
|
user_attach_call(call, user_call_ID);
|
||||||
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
||||||
rb_link_node(&call->sock_node, parent, pp);
|
|
||||||
rb_insert_color(&call->sock_node, &rx->calls);
|
|
||||||
set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
||||||
|
rb_link_node(&call->sock_node, parent, pp);
|
||||||
|
rb_insert_color(&call->sock_node, &rx->calls);
|
||||||
|
set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
||||||
|
|
||||||
list_add(&call->sock_link, &rx->sock_calls);
|
list_add(&call->sock_link, &rx->sock_calls);
|
||||||
|
|
||||||
write_unlock(&rx->call_lock);
|
write_unlock(&rx->call_lock);
|
||||||
|
@ -157,11 +157,8 @@ id_in_use:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Preallocate sufficient service connections, calls and peers to cover the
|
* Allocate the preallocation buffers for incoming service calls. These must
|
||||||
* entire backlog of a socket. When a new call comes in, if we don't have
|
* be charged manually.
|
||||||
* sufficient of each available, the call gets rejected as busy or ignored.
|
|
||||||
*
|
|
||||||
* The backlog is replenished when a connection is accepted or rejected.
|
|
||||||
*/
|
*/
|
||||||
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
|
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
|
||||||
{
|
{
|
||||||
|
@ -174,13 +171,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
|
||||||
rx->backlog = b;
|
rx->backlog = b;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rx->discard_new_call)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
|
|
||||||
atomic_inc_return(&rxrpc_debug_id)) == 0)
|
|
||||||
;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,6 +323,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
||||||
rxrpc_see_call(call);
|
rxrpc_see_call(call);
|
||||||
call->conn = conn;
|
call->conn = conn;
|
||||||
call->security = conn->security;
|
call->security = conn->security;
|
||||||
|
call->security_ix = conn->security_ix;
|
||||||
call->peer = rxrpc_get_peer(conn->params.peer);
|
call->peer = rxrpc_get_peer(conn->params.peer);
|
||||||
call->cong_cwnd = call->peer->cong_cwnd;
|
call->cong_cwnd = call->peer->cong_cwnd;
|
||||||
return call;
|
return call;
|
||||||
|
@ -402,8 +393,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||||
|
|
||||||
if (rx->notify_new_call)
|
if (rx->notify_new_call)
|
||||||
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
|
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
|
||||||
else
|
|
||||||
sk_acceptq_added(&rx->sk);
|
|
||||||
|
|
||||||
spin_lock(&conn->state_lock);
|
spin_lock(&conn->state_lock);
|
||||||
switch (conn->state) {
|
switch (conn->state) {
|
||||||
|
@ -415,12 +404,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||||
|
|
||||||
case RXRPC_CONN_SERVICE:
|
case RXRPC_CONN_SERVICE:
|
||||||
write_lock(&call->state_lock);
|
write_lock(&call->state_lock);
|
||||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
if (call->state < RXRPC_CALL_COMPLETE)
|
||||||
if (rx->discard_new_call)
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
||||||
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
||||||
else
|
|
||||||
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
|
||||||
}
|
|
||||||
write_unlock(&call->state_lock);
|
write_unlock(&call->state_lock);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -440,9 +425,6 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
||||||
|
|
||||||
rxrpc_send_ping(call, skb);
|
rxrpc_send_ping(call, skb);
|
||||||
|
|
||||||
if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
|
|
||||||
rxrpc_notify_socket(call);
|
|
||||||
|
|
||||||
/* We have to discard the prealloc queue's ref here and rely on a
|
/* We have to discard the prealloc queue's ref here and rely on a
|
||||||
* combination of the RCU read lock and refs held either by the socket
|
* combination of the RCU read lock and refs held either by the socket
|
||||||
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
|
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
|
||||||
|
@ -460,187 +442,18 @@ no_call:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* handle acceptance of a call by userspace
|
* Charge up socket with preallocated calls, attaching user call IDs.
|
||||||
* - assign the user call ID to the call at the front of the queue
|
|
||||||
* - called with the socket locked.
|
|
||||||
*/
|
*/
|
||||||
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
|
int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
|
||||||
unsigned long user_call_ID,
|
|
||||||
rxrpc_notify_rx_t notify_rx)
|
|
||||||
__releases(&rx->sk.sk_lock.slock)
|
|
||||||
__acquires(call->user_mutex)
|
|
||||||
{
|
{
|
||||||
struct rxrpc_call *call;
|
struct rxrpc_backlog *b = rx->backlog;
|
||||||
struct rb_node *parent, **pp;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
_enter(",%lx", user_call_ID);
|
if (rx->sk.sk_state == RXRPC_CLOSE)
|
||||||
|
return -ESHUTDOWN;
|
||||||
|
|
||||||
ASSERT(!irqs_disabled());
|
return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
|
||||||
|
GFP_KERNEL,
|
||||||
write_lock(&rx->call_lock);
|
atomic_inc_return(&rxrpc_debug_id));
|
||||||
|
|
||||||
if (list_empty(&rx->to_be_accepted)) {
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
release_sock(&rx->sk);
|
|
||||||
kleave(" = -ENODATA [empty]");
|
|
||||||
return ERR_PTR(-ENODATA);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* check the user ID isn't already in use */
|
|
||||||
pp = &rx->calls.rb_node;
|
|
||||||
parent = NULL;
|
|
||||||
while (*pp) {
|
|
||||||
parent = *pp;
|
|
||||||
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
||||||
|
|
||||||
if (user_call_ID < call->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_left;
|
|
||||||
else if (user_call_ID > call->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_right;
|
|
||||||
else
|
|
||||||
goto id_in_use;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Dequeue the first call and check it's still valid. We gain
|
|
||||||
* responsibility for the queue's reference.
|
|
||||||
*/
|
|
||||||
call = list_entry(rx->to_be_accepted.next,
|
|
||||||
struct rxrpc_call, accept_link);
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
|
|
||||||
/* We need to gain the mutex from the interrupt handler without
|
|
||||||
* upsetting lockdep, so we have to release it there and take it here.
|
|
||||||
* We are, however, still holding the socket lock, so other accepts
|
|
||||||
* must wait for us and no one can add the user ID behind our backs.
|
|
||||||
*/
|
|
||||||
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
|
|
||||||
release_sock(&rx->sk);
|
|
||||||
kleave(" = -ERESTARTSYS");
|
|
||||||
return ERR_PTR(-ERESTARTSYS);
|
|
||||||
}
|
|
||||||
|
|
||||||
write_lock(&rx->call_lock);
|
|
||||||
list_del_init(&call->accept_link);
|
|
||||||
sk_acceptq_removed(&rx->sk);
|
|
||||||
rxrpc_see_call(call);
|
|
||||||
|
|
||||||
/* Find the user ID insertion point. */
|
|
||||||
pp = &rx->calls.rb_node;
|
|
||||||
parent = NULL;
|
|
||||||
while (*pp) {
|
|
||||||
parent = *pp;
|
|
||||||
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
||||||
|
|
||||||
if (user_call_ID < call->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_left;
|
|
||||||
else if (user_call_ID > call->user_call_ID)
|
|
||||||
pp = &(*pp)->rb_right;
|
|
||||||
else
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
write_lock_bh(&call->state_lock);
|
|
||||||
switch (call->state) {
|
|
||||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
||||||
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
||||||
break;
|
|
||||||
case RXRPC_CALL_COMPLETE:
|
|
||||||
ret = call->error;
|
|
||||||
goto out_release;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* formalise the acceptance */
|
|
||||||
call->notify_rx = notify_rx;
|
|
||||||
call->user_call_ID = user_call_ID;
|
|
||||||
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
||||||
rb_link_node(&call->sock_node, parent, pp);
|
|
||||||
rb_insert_color(&call->sock_node, &rx->calls);
|
|
||||||
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
write_unlock_bh(&call->state_lock);
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
rxrpc_notify_socket(call);
|
|
||||||
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
||||||
release_sock(&rx->sk);
|
|
||||||
_leave(" = %p{%d}", call, call->debug_id);
|
|
||||||
return call;
|
|
||||||
|
|
||||||
out_release:
|
|
||||||
_debug("release %p", call);
|
|
||||||
write_unlock_bh(&call->state_lock);
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
rxrpc_release_call(rx, call);
|
|
||||||
rxrpc_put_call(call, rxrpc_call_put);
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
id_in_use:
|
|
||||||
ret = -EBADSLT;
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
out:
|
|
||||||
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
||||||
release_sock(&rx->sk);
|
|
||||||
_leave(" = %d", ret);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Handle rejection of a call by userspace
|
|
||||||
* - reject the call at the front of the queue
|
|
||||||
*/
|
|
||||||
int rxrpc_reject_call(struct rxrpc_sock *rx)
|
|
||||||
{
|
|
||||||
struct rxrpc_call *call;
|
|
||||||
bool abort = false;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
_enter("");
|
|
||||||
|
|
||||||
ASSERT(!irqs_disabled());
|
|
||||||
|
|
||||||
write_lock(&rx->call_lock);
|
|
||||||
|
|
||||||
if (list_empty(&rx->to_be_accepted)) {
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
return -ENODATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Dequeue the first call and check it's still valid. We gain
|
|
||||||
* responsibility for the queue's reference.
|
|
||||||
*/
|
|
||||||
call = list_entry(rx->to_be_accepted.next,
|
|
||||||
struct rxrpc_call, accept_link);
|
|
||||||
list_del_init(&call->accept_link);
|
|
||||||
sk_acceptq_removed(&rx->sk);
|
|
||||||
rxrpc_see_call(call);
|
|
||||||
|
|
||||||
write_lock_bh(&call->state_lock);
|
|
||||||
switch (call->state) {
|
|
||||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
||||||
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
|
|
||||||
abort = true;
|
|
||||||
fallthrough;
|
|
||||||
case RXRPC_CALL_COMPLETE:
|
|
||||||
ret = call->error;
|
|
||||||
goto out_discard;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
out_discard:
|
|
||||||
write_unlock_bh(&call->state_lock);
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
if (abort) {
|
|
||||||
rxrpc_send_abort_packet(call);
|
|
||||||
rxrpc_release_call(rx, call);
|
|
||||||
rxrpc_put_call(call, rxrpc_call_put);
|
|
||||||
}
|
|
||||||
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
||||||
_leave(" = %d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -23,7 +23,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
|
||||||
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
|
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
|
||||||
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
|
[RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
|
||||||
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
|
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
|
||||||
[RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
|
|
||||||
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
|
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
|
||||||
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
|
[RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
|
||||||
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
|
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
|
||||||
|
@ -352,9 +351,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
||||||
call->call_id = sp->hdr.callNumber;
|
call->call_id = sp->hdr.callNumber;
|
||||||
call->service_id = sp->hdr.serviceId;
|
call->service_id = sp->hdr.serviceId;
|
||||||
call->cid = sp->hdr.cid;
|
call->cid = sp->hdr.cid;
|
||||||
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
call->state = RXRPC_CALL_SERVER_SECURING;
|
||||||
if (sp->hdr.securityIndex > 0)
|
|
||||||
call->state = RXRPC_CALL_SERVER_SECURING;
|
|
||||||
call->cong_tstamp = skb->tstamp;
|
call->cong_tstamp = skb->tstamp;
|
||||||
|
|
||||||
/* Set the channel for this call. We don't get channel_lock as we're
|
/* Set the channel for this call. We don't get channel_lock as we're
|
||||||
|
|
|
@ -269,7 +269,7 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
|
||||||
if (call) {
|
if (call) {
|
||||||
write_lock_bh(&call->state_lock);
|
write_lock_bh(&call->state_lock);
|
||||||
if (call->state == RXRPC_CALL_SERVER_SECURING) {
|
if (call->state == RXRPC_CALL_SERVER_SECURING) {
|
||||||
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
||||||
rxrpc_notify_socket(call);
|
rxrpc_notify_socket(call);
|
||||||
}
|
}
|
||||||
write_unlock_bh(&call->state_lock);
|
write_unlock_bh(&call->state_lock);
|
||||||
|
@ -340,18 +340,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock(&conn->channel_lock);
|
spin_lock(&conn->channel_lock);
|
||||||
spin_lock(&conn->state_lock);
|
spin_lock_bh(&conn->state_lock);
|
||||||
|
|
||||||
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
|
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
|
||||||
conn->state = RXRPC_CONN_SERVICE;
|
conn->state = RXRPC_CONN_SERVICE;
|
||||||
spin_unlock(&conn->state_lock);
|
spin_unlock_bh(&conn->state_lock);
|
||||||
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
|
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
|
||||||
rxrpc_call_is_secure(
|
rxrpc_call_is_secure(
|
||||||
rcu_dereference_protected(
|
rcu_dereference_protected(
|
||||||
conn->channels[loop].call,
|
conn->channels[loop].call,
|
||||||
lockdep_is_held(&conn->channel_lock)));
|
lockdep_is_held(&conn->channel_lock)));
|
||||||
} else {
|
} else {
|
||||||
spin_unlock(&conn->state_lock);
|
spin_unlock_bh(&conn->state_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&conn->channel_lock);
|
spin_unlock(&conn->channel_lock);
|
||||||
|
|
|
@ -903,7 +903,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
|
||||||
|
|
||||||
_enter("");
|
_enter("");
|
||||||
|
|
||||||
if (optlen <= 0 || optlen > PAGE_SIZE - 1)
|
if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
description = memdup_sockptr_nul(optval, optlen);
|
description = memdup_sockptr_nul(optval, optlen);
|
||||||
|
@ -940,7 +940,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
|
||||||
if (IS_ERR(description))
|
if (IS_ERR(description))
|
||||||
return PTR_ERR(description);
|
return PTR_ERR(description);
|
||||||
|
|
||||||
key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL);
|
key = request_key(&key_type_keyring, description, NULL);
|
||||||
if (IS_ERR(key)) {
|
if (IS_ERR(key)) {
|
||||||
kfree(description);
|
kfree(description);
|
||||||
_leave(" = %ld", PTR_ERR(key));
|
_leave(" = %ld", PTR_ERR(key));
|
||||||
|
@ -1072,7 +1072,7 @@ static long rxrpc_read(const struct key *key,
|
||||||
|
|
||||||
switch (token->security_index) {
|
switch (token->security_index) {
|
||||||
case RXRPC_SECURITY_RXKAD:
|
case RXRPC_SECURITY_RXKAD:
|
||||||
toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin,
|
toksize += 8 * 4; /* viceid, kvno, key*2, begin,
|
||||||
* end, primary, tktlen */
|
* end, primary, tktlen */
|
||||||
toksize += RND(token->kad->ticket_len);
|
toksize += RND(token->kad->ticket_len);
|
||||||
break;
|
break;
|
||||||
|
@ -1107,7 +1107,8 @@ static long rxrpc_read(const struct key *key,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: /* we have a ticket we can't encode */
|
default: /* we have a ticket we can't encode */
|
||||||
BUG();
|
pr_err("Unsupported key token type (%u)\n",
|
||||||
|
token->security_index);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1138,6 +1139,14 @@ static long rxrpc_read(const struct key *key,
|
||||||
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
|
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
|
||||||
xdr += (_l + 3) >> 2; \
|
xdr += (_l + 3) >> 2; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
#define ENCODE_BYTES(l, s) \
|
||||||
|
do { \
|
||||||
|
u32 _l = (l); \
|
||||||
|
memcpy(xdr, (s), _l); \
|
||||||
|
if (_l & 3) \
|
||||||
|
memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
|
||||||
|
xdr += (_l + 3) >> 2; \
|
||||||
|
} while(0)
|
||||||
#define ENCODE64(x) \
|
#define ENCODE64(x) \
|
||||||
do { \
|
do { \
|
||||||
__be64 y = cpu_to_be64(x); \
|
__be64 y = cpu_to_be64(x); \
|
||||||
|
@ -1165,7 +1174,7 @@ static long rxrpc_read(const struct key *key,
|
||||||
case RXRPC_SECURITY_RXKAD:
|
case RXRPC_SECURITY_RXKAD:
|
||||||
ENCODE(token->kad->vice_id);
|
ENCODE(token->kad->vice_id);
|
||||||
ENCODE(token->kad->kvno);
|
ENCODE(token->kad->kvno);
|
||||||
ENCODE_DATA(8, token->kad->session_key);
|
ENCODE_BYTES(8, token->kad->session_key);
|
||||||
ENCODE(token->kad->start);
|
ENCODE(token->kad->start);
|
||||||
ENCODE(token->kad->expiry);
|
ENCODE(token->kad->expiry);
|
||||||
ENCODE(token->kad->primary_flag);
|
ENCODE(token->kad->primary_flag);
|
||||||
|
@ -1215,7 +1224,6 @@ static long rxrpc_read(const struct key *key,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
BUG();
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,37 +178,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Pass back notification of a new call. The call is added to the
|
|
||||||
* to-be-accepted list. This means that the next call to be accepted might not
|
|
||||||
* be the last call seen awaiting acceptance, but unless we leave this on the
|
|
||||||
* front of the queue and block all other messages until someone gives us a
|
|
||||||
* user_ID for it, there's not a lot we can do.
|
|
||||||
*/
|
|
||||||
static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
|
|
||||||
struct rxrpc_call *call,
|
|
||||||
struct msghdr *msg, int flags)
|
|
||||||
{
|
|
||||||
int tmp = 0, ret;
|
|
||||||
|
|
||||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
|
|
||||||
|
|
||||||
if (ret == 0 && !(flags & MSG_PEEK)) {
|
|
||||||
_debug("to be accepted");
|
|
||||||
write_lock_bh(&rx->recvmsg_lock);
|
|
||||||
list_del_init(&call->recvmsg_link);
|
|
||||||
write_unlock_bh(&rx->recvmsg_lock);
|
|
||||||
|
|
||||||
rxrpc_get_call(call, rxrpc_call_got);
|
|
||||||
write_lock(&rx->call_lock);
|
|
||||||
list_add_tail(&call->accept_link, &rx->to_be_accepted);
|
|
||||||
write_unlock(&rx->call_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* End the packet reception phase.
|
* End the packet reception phase.
|
||||||
*/
|
*/
|
||||||
|
@ -630,9 +599,6 @@ try_again:
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (READ_ONCE(call->state)) {
|
switch (READ_ONCE(call->state)) {
|
||||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
||||||
ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
|
|
||||||
break;
|
|
||||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||||
|
@ -728,7 +694,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
|
||||||
call->debug_id, rxrpc_call_states[call->state],
|
call->debug_id, rxrpc_call_states[call->state],
|
||||||
iov_iter_count(iter), want_more);
|
iov_iter_count(iter), want_more);
|
||||||
|
|
||||||
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
|
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
|
||||||
|
|
||||||
mutex_lock(&call->user_mutex);
|
mutex_lock(&call->user_mutex);
|
||||||
|
|
||||||
|
|
|
@ -530,10 +530,10 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RXRPC_ACCEPT:
|
case RXRPC_CHARGE_ACCEPT:
|
||||||
if (p->command != RXRPC_CMD_SEND_DATA)
|
if (p->command != RXRPC_CMD_SEND_DATA)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
p->command = RXRPC_CMD_ACCEPT;
|
p->command = RXRPC_CMD_CHARGE_ACCEPT;
|
||||||
if (len != 0)
|
if (len != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
|
@ -659,16 +659,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error_release_sock;
|
goto error_release_sock;
|
||||||
|
|
||||||
if (p.command == RXRPC_CMD_ACCEPT) {
|
if (p.command == RXRPC_CMD_CHARGE_ACCEPT) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
|
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
|
||||||
goto error_release_sock;
|
goto error_release_sock;
|
||||||
call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
|
ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
|
||||||
/* The socket is now unlocked. */
|
goto error_release_sock;
|
||||||
if (IS_ERR(call))
|
|
||||||
return PTR_ERR(call);
|
|
||||||
ret = 0;
|
|
||||||
goto out_put_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
|
call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
|
||||||
|
@ -690,7 +686,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
|
||||||
case RXRPC_CALL_CLIENT_AWAIT_CONN:
|
case RXRPC_CALL_CLIENT_AWAIT_CONN:
|
||||||
case RXRPC_CALL_SERVER_PREALLOC:
|
case RXRPC_CALL_SERVER_PREALLOC:
|
||||||
case RXRPC_CALL_SERVER_SECURING:
|
case RXRPC_CALL_SERVER_SECURING:
|
||||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
||||||
rxrpc_put_call(call, rxrpc_call_put);
|
rxrpc_put_call(call, rxrpc_call_put);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto error_release_sock;
|
goto error_release_sock;
|
||||||
|
|
|
@ -494,6 +494,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
|
||||||
out_err:
|
out_err:
|
||||||
/* Clean up any successful allocations */
|
/* Clean up any successful allocations */
|
||||||
sctp_auth_destroy_hmacs(ep->auth_hmacs);
|
sctp_auth_destroy_hmacs(ep->auth_hmacs);
|
||||||
|
ep->auth_hmacs = NULL;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4172,6 +4172,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (key.idx < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (info->attrs[NL80211_ATTR_MAC])
|
if (info->attrs[NL80211_ATTR_MAC])
|
||||||
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
|
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче