2019-05-23 12:14:41 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2018-02-13 14:33:21 +03:00
|
|
|
/* SCTP kernel implementation
|
|
|
|
* (C) Copyright Red Hat Inc. 2017
|
|
|
|
*
|
|
|
|
* This file is part of the SCTP kernel implementation
|
|
|
|
*
|
|
|
|
* These functions implement sctp diag support.
|
|
|
|
*
|
|
|
|
* Please send any bug reports or fixes you make to the
|
|
|
|
* email addresched(es):
|
|
|
|
* lksctp developers <linux-sctp@vger.kernel.org>
|
|
|
|
*
|
|
|
|
* Written or modified by:
|
|
|
|
* Xin Long <lucien.xin@gmail.com>
|
|
|
|
*/
|
|
|
|
|
2016-04-14 10:35:33 +03:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/inet_diag.h>
|
|
|
|
#include <linux/sock_diag.h>
|
|
|
|
#include <net/sctp/sctp.h>
|
|
|
|
|
|
|
|
static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|
|
|
void *info);
|
|
|
|
|
|
|
|
/* define some functions to make asoc/ep fill look clean */
|
|
|
|
static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
|
|
|
|
struct sock *sk,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
union sctp_addr laddr, paddr;
|
|
|
|
struct dst_entry *dst;
|
2016-08-04 13:11:56 +03:00
|
|
|
struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
|
2016-04-14 10:35:33 +03:00
|
|
|
|
|
|
|
laddr = list_entry(asoc->base.bind_addr.address_list.next,
|
|
|
|
struct sctp_sockaddr_entry, list)->a;
|
|
|
|
paddr = asoc->peer.primary_path->ipaddr;
|
|
|
|
dst = asoc->peer.primary_path->dst;
|
|
|
|
|
|
|
|
r->idiag_family = sk->sk_family;
|
|
|
|
r->id.idiag_sport = htons(asoc->base.bind_addr.port);
|
|
|
|
r->id.idiag_dport = htons(asoc->peer.port);
|
|
|
|
r->id.idiag_if = dst ? dst->dev->ifindex : 0;
|
|
|
|
sock_diag_save_cookie(sk, r->id.idiag_cookie);
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (sk->sk_family == AF_INET6) {
|
|
|
|
*(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
|
|
|
|
*(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
|
|
|
|
memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
|
|
|
|
|
|
|
|
r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
|
|
|
|
r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
r->idiag_state = asoc->state;
|
2016-08-04 13:11:56 +03:00
|
|
|
if (timer_pending(t3_rtx)) {
|
|
|
|
r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
|
|
|
|
r->idiag_retrans = asoc->rtx_data_chunks;
|
|
|
|
r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
|
|
|
|
}
|
2016-04-14 10:35:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
|
|
|
|
struct list_head *address_list)
|
|
|
|
{
|
|
|
|
struct sctp_sockaddr_entry *laddr;
|
|
|
|
int addrlen = sizeof(struct sockaddr_storage);
|
|
|
|
int addrcnt = 0;
|
|
|
|
struct nlattr *attr;
|
|
|
|
void *info = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(laddr, address_list, list)
|
|
|
|
addrcnt++;
|
|
|
|
|
|
|
|
attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
|
|
|
|
if (!attr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
info = nla_data(attr);
|
|
|
|
list_for_each_entry_rcu(laddr, address_list, list) {
|
2017-08-23 14:27:13 +03:00
|
|
|
memcpy(info, &laddr->a, sizeof(laddr->a));
|
|
|
|
memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
|
2016-04-14 10:35:33 +03:00
|
|
|
info += addrlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
int addrlen = sizeof(struct sockaddr_storage);
|
|
|
|
struct sctp_transport *from;
|
|
|
|
struct nlattr *attr;
|
|
|
|
void *info = NULL;
|
|
|
|
|
|
|
|
attr = nla_reserve(skb, INET_DIAG_PEERS,
|
|
|
|
addrlen * asoc->peer.transport_count);
|
|
|
|
if (!attr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
info = nla_data(attr);
|
|
|
|
list_for_each_entry(from, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
2017-08-23 14:27:13 +03:00
|
|
|
memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
|
|
|
|
memset(info + sizeof(from->ipaddr), 0,
|
|
|
|
addrlen - sizeof(from->ipaddr));
|
2016-04-14 10:35:33 +03:00
|
|
|
info += addrlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sctp asoc/ep fill*/
|
|
|
|
static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
const struct inet_diag_req_v2 *req,
|
|
|
|
struct user_namespace *user_ns,
|
|
|
|
int portid, u32 seq, u16 nlmsg_flags,
|
2016-09-07 18:42:25 +03:00
|
|
|
const struct nlmsghdr *unlh,
|
|
|
|
bool net_admin)
|
2016-04-14 10:35:33 +03:00
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
|
|
|
struct list_head *addr_list;
|
|
|
|
struct inet_diag_msg *r;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
int ext = req->idiag_ext;
|
|
|
|
struct sctp_infox infox;
|
|
|
|
void *info = NULL;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
|
|
|
|
nlmsg_flags);
|
|
|
|
if (!nlh)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
r = nlmsg_data(nlh);
|
|
|
|
BUG_ON(!sk_fullsock(sk));
|
|
|
|
|
2022-03-10 03:11:45 +03:00
|
|
|
r->idiag_timer = 0;
|
|
|
|
r->idiag_retrans = 0;
|
|
|
|
r->idiag_expires = 0;
|
2016-04-14 10:35:33 +03:00
|
|
|
if (asoc) {
|
|
|
|
inet_diag_msg_sctpasoc_fill(r, sk, asoc);
|
|
|
|
} else {
|
|
|
|
inet_diag_msg_common_fill(r, sk);
|
|
|
|
r->idiag_state = sk->sk_state;
|
|
|
|
}
|
|
|
|
|
2016-09-07 18:42:25 +03:00
|
|
|
if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
|
2016-04-14 10:35:33 +03:00
|
|
|
goto errout;
|
|
|
|
|
|
|
|
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
|
|
|
|
u32 mem[SK_MEMINFO_VARS];
|
|
|
|
int amt;
|
|
|
|
|
|
|
|
if (asoc && asoc->ep->sndbuf_policy)
|
|
|
|
amt = asoc->sndbuf_used;
|
|
|
|
else
|
|
|
|
amt = sk_wmem_alloc_get(sk);
|
|
|
|
mem[SK_MEMINFO_WMEM_ALLOC] = amt;
|
2016-04-24 18:21:22 +03:00
|
|
|
if (asoc && asoc->ep->rcvbuf_policy)
|
|
|
|
amt = atomic_read(&asoc->rmem_alloc);
|
|
|
|
else
|
|
|
|
amt = sk_rmem_alloc_get(sk);
|
|
|
|
mem[SK_MEMINFO_RMEM_ALLOC] = amt;
|
2016-04-14 10:35:33 +03:00
|
|
|
mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
|
|
|
|
mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
|
|
|
|
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
|
|
|
|
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
|
|
|
|
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
|
2019-10-10 01:41:03 +03:00
|
|
|
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
|
2016-04-14 10:35:33 +03:00
|
|
|
mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
|
|
|
|
|
|
|
|
if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ext & (1 << (INET_DIAG_INFO - 1))) {
|
|
|
|
struct nlattr *attr;
|
|
|
|
|
2016-04-26 11:06:14 +03:00
|
|
|
attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
|
|
|
|
sizeof(struct sctp_info),
|
|
|
|
INET_DIAG_PAD);
|
2016-04-14 10:35:33 +03:00
|
|
|
if (!attr)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
info = nla_data(attr);
|
|
|
|
}
|
|
|
|
infox.sctpinfo = (struct sctp_info *)info;
|
|
|
|
infox.asoc = asoc;
|
|
|
|
sctp_diag_get_info(sk, r, &infox);
|
|
|
|
|
|
|
|
addr_list = asoc ? &asoc->base.bind_addr.address_list
|
|
|
|
: &ep->base.bind_addr.address_list;
|
|
|
|
if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
|
|
|
|
if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
errout:
|
|
|
|
nlmsg_cancel(skb, nlh);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* callback and param */
|
|
|
|
struct sctp_comm_param {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct netlink_callback *cb;
|
|
|
|
const struct inet_diag_req_v2 *r;
|
|
|
|
const struct nlmsghdr *nlh;
|
2016-09-07 18:42:25 +03:00
|
|
|
bool net_admin;
|
2016-04-14 10:35:33 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static size_t inet_assoc_attr_size(struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
int addrlen = sizeof(struct sockaddr_storage);
|
|
|
|
int addrcnt = 0;
|
|
|
|
struct sctp_sockaddr_entry *laddr;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
|
|
|
|
list)
|
|
|
|
addrcnt++;
|
|
|
|
|
|
|
|
return nla_total_size(sizeof(struct sctp_info))
|
|
|
|
+ nla_total_size(addrlen * asoc->peer.transport_count)
|
|
|
|
+ nla_total_size(addrlen * addrcnt)
|
|
|
|
+ nla_total_size(sizeof(struct inet_diag_msg))
|
2020-03-05 15:33:12 +03:00
|
|
|
+ inet_diag_msg_attrs_size()
|
|
|
|
+ nla_total_size(sizeof(struct inet_diag_meminfo))
|
2016-04-14 10:35:33 +03:00
|
|
|
+ 64;
|
|
|
|
}
|
|
|
|
|
2022-01-01 02:37:37 +03:00
|
|
|
static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
2016-04-14 10:35:33 +03:00
|
|
|
{
|
|
|
|
struct sctp_association *assoc = tsp->asoc;
|
|
|
|
struct sctp_comm_param *commp = p;
|
2022-01-01 02:37:37 +03:00
|
|
|
struct sock *sk = ep->base.sk;
|
2016-04-14 10:35:33 +03:00
|
|
|
const struct inet_diag_req_v2 *req = commp->r;
|
2022-01-01 02:37:37 +03:00
|
|
|
struct sk_buff *skb = commp->skb;
|
2016-04-14 10:35:33 +03:00
|
|
|
struct sk_buff *rep;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
|
|
|
|
if (err)
|
2022-01-01 02:37:37 +03:00
|
|
|
return err;
|
2016-04-14 10:35:33 +03:00
|
|
|
|
|
|
|
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
|
|
|
|
if (!rep)
|
2022-01-01 02:37:37 +03:00
|
|
|
return -ENOMEM;
|
2016-04-14 10:35:33 +03:00
|
|
|
|
|
|
|
lock_sock(sk);
|
2022-01-01 02:37:37 +03:00
|
|
|
if (ep != assoc->ep) {
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto out;
|
2016-04-14 10:35:33 +03:00
|
|
|
}
|
2022-01-01 02:37:37 +03:00
|
|
|
|
|
|
|
err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
|
|
|
|
NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
|
|
|
|
commp->nlh, commp->net_admin);
|
2016-04-14 10:35:33 +03:00
|
|
|
if (err < 0) {
|
|
|
|
WARN_ON(err == -EMSGSIZE);
|
|
|
|
goto out;
|
|
|
|
}
|
2022-01-01 02:37:37 +03:00
|
|
|
release_sock(sk);
|
2016-04-14 10:35:33 +03:00
|
|
|
|
2022-01-01 02:37:37 +03:00
|
|
|
return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
|
2021-07-13 05:48:24 +03:00
|
|
|
|
2016-04-14 10:35:33 +03:00
|
|
|
out:
|
2022-01-01 02:37:37 +03:00
|
|
|
release_sock(sk);
|
|
|
|
kfree_skb(rep);
|
2016-04-14 10:35:33 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
sctp: use call_rcu to free endpoint
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-12-23 21:04:30 +03:00
|
|
|
static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
2016-04-14 10:35:33 +03:00
|
|
|
{
|
|
|
|
struct sctp_comm_param *commp = p;
|
2017-09-15 06:02:21 +03:00
|
|
|
struct sock *sk = ep->base.sk;
|
2016-04-14 10:35:33 +03:00
|
|
|
struct sk_buff *skb = commp->skb;
|
|
|
|
struct netlink_callback *cb = commp->cb;
|
|
|
|
const struct inet_diag_req_v2 *r = commp->r;
|
2016-09-28 21:55:44 +03:00
|
|
|
struct sctp_association *assoc;
|
2016-04-14 10:35:33 +03:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
sctp: use call_rcu to free endpoint
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-12-23 21:04:30 +03:00
|
|
|
if (ep != tsp->asoc->ep)
|
|
|
|
goto release;
|
2017-09-15 06:02:21 +03:00
|
|
|
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
2016-04-14 10:35:33 +03:00
|
|
|
if (cb->args[4] < cb->args[1])
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
|
|
|
|
r->id.idiag_sport)
|
|
|
|
goto next;
|
|
|
|
if (r->id.idiag_dport != htons(assoc->peer.port) &&
|
|
|
|
r->id.idiag_dport)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (!cb->args[3] &&
|
|
|
|
inet_sctp_diag_fill(sk, NULL, skb, r,
|
|
|
|
sk_user_ns(NETLINK_CB(cb->skb).sk),
|
|
|
|
NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq,
|
2016-09-07 18:42:25 +03:00
|
|
|
NLM_F_MULTI, cb->nlh,
|
|
|
|
commp->net_admin) < 0) {
|
2016-09-28 21:55:44 +03:00
|
|
|
err = 1;
|
2016-04-14 10:35:33 +03:00
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
cb->args[3] = 1;
|
|
|
|
|
|
|
|
if (inet_sctp_diag_fill(sk, assoc, skb, r,
|
|
|
|
sk_user_ns(NETLINK_CB(cb->skb).sk),
|
|
|
|
NETLINK_CB(cb->skb).portid,
|
2016-09-07 18:42:25 +03:00
|
|
|
cb->nlh->nlmsg_seq, 0, cb->nlh,
|
|
|
|
commp->net_admin) < 0) {
|
2016-09-28 21:55:44 +03:00
|
|
|
err = 1;
|
2016-04-14 10:35:33 +03:00
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
cb->args[4]++;
|
|
|
|
}
|
|
|
|
cb->args[1] = 0;
|
|
|
|
cb->args[3] = 0;
|
|
|
|
cb->args[4] = 0;
|
|
|
|
release:
|
|
|
|
release_sock(sk);
|
|
|
|
return err;
|
2016-09-28 21:55:44 +03:00
|
|
|
}
|
|
|
|
|
sctp: use call_rcu to free endpoint
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-12-23 21:04:30 +03:00
|
|
|
static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
2016-09-28 21:55:44 +03:00
|
|
|
{
|
|
|
|
struct sctp_comm_param *commp = p;
|
|
|
|
struct sock *sk = ep->base.sk;
|
|
|
|
const struct inet_diag_req_v2 *r = commp->r;
|
|
|
|
struct sctp_association *assoc =
|
|
|
|
list_entry(ep->asocs.next, struct sctp_association, asocs);
|
|
|
|
|
|
|
|
/* find the ep only once through the transports by this condition */
|
|
|
|
if (tsp->asoc != assoc)
|
2017-09-15 06:02:21 +03:00
|
|
|
return 0;
|
2016-09-28 21:55:44 +03:00
|
|
|
|
|
|
|
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
|
2017-09-15 06:02:21 +03:00
|
|
|
return 0;
|
2016-09-28 21:55:44 +03:00
|
|
|
|
|
|
|
return 1;
|
2016-04-14 10:35:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
|
|
|
|
{
|
|
|
|
struct sctp_comm_param *commp = p;
|
|
|
|
struct sock *sk = ep->base.sk;
|
|
|
|
struct sk_buff *skb = commp->skb;
|
|
|
|
struct netlink_callback *cb = commp->cb;
|
|
|
|
const struct inet_diag_req_v2 *r = commp->r;
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!net_eq(sock_net(sk), net))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (cb->args[4] < cb->args[1])
|
|
|
|
goto next;
|
|
|
|
|
2016-08-04 13:11:57 +03:00
|
|
|
if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
|
sctp: fix double EPs display in sctp_diag
We have this situation: that EP hash table, contains only the EPs
that are listening, while the transports one, has the opposite.
We have to traverse both to dump all.
But when we traverse the transports one we will also get EPs that are
in the EP hash if they are listening. In this case, the EP is dumped
twice.
We will fix it by checking if the endpoint that is in the endpoint
hash table contains any ep->asoc in there, as it means we will also
find it via transport hash, and thus we can/should skip it, depending
on the filters used, like 'ss -l'.
Still, we should NOT skip it if the user is listing only listening
endpoints, because then we are not traversing the transport hash.
so we have to check idiag_states there also.
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-05-25 22:09:23 +03:00
|
|
|
goto next;
|
|
|
|
|
2016-04-14 10:35:33 +03:00
|
|
|
if (r->sdiag_family != AF_UNSPEC &&
|
|
|
|
sk->sk_family != r->sdiag_family)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (r->id.idiag_sport != inet->inet_sport &&
|
|
|
|
r->id.idiag_sport)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (r->id.idiag_dport != inet->inet_dport &&
|
|
|
|
r->id.idiag_dport)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (inet_sctp_diag_fill(sk, NULL, skb, r,
|
|
|
|
sk_user_ns(NETLINK_CB(cb->skb).sk),
|
|
|
|
NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
2016-09-07 18:42:25 +03:00
|
|
|
cb->nlh, commp->net_admin) < 0) {
|
2016-04-14 10:35:33 +03:00
|
|
|
err = 2;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
cb->args[4]++;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* define the functions for sctp_diag_handler*/
|
|
|
|
static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|
|
|
void *info)
|
|
|
|
{
|
|
|
|
struct sctp_infox *infox = (struct sctp_infox *)info;
|
|
|
|
|
|
|
|
if (infox->asoc) {
|
|
|
|
r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
|
|
|
|
r->idiag_wqueue = infox->asoc->sndbuf_used;
|
|
|
|
} else {
|
2019-11-06 01:11:53 +03:00
|
|
|
r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
|
2019-11-06 01:11:54 +03:00
|
|
|
r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
|
2016-04-14 10:35:33 +03:00
|
|
|
}
|
|
|
|
if (infox->sctpinfo)
|
|
|
|
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
|
|
|
|
}
|
|
|
|
|
2020-02-26 02:04:09 +03:00
|
|
|
static int sctp_diag_dump_one(struct netlink_callback *cb,
|
2016-04-14 10:35:33 +03:00
|
|
|
const struct inet_diag_req_v2 *req)
|
|
|
|
{
|
2022-01-01 02:37:37 +03:00
|
|
|
struct sk_buff *skb = cb->skb;
|
|
|
|
struct net *net = sock_net(skb->sk);
|
2020-02-26 02:04:09 +03:00
|
|
|
const struct nlmsghdr *nlh = cb->nlh;
|
2016-04-14 10:35:33 +03:00
|
|
|
union sctp_addr laddr, paddr;
|
|
|
|
struct sctp_comm_param commp = {
|
2022-01-01 02:37:37 +03:00
|
|
|
.skb = skb,
|
2016-04-14 10:35:33 +03:00
|
|
|
.r = req,
|
|
|
|
.nlh = nlh,
|
2022-01-01 02:37:37 +03:00
|
|
|
.net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
|
2016-04-14 10:35:33 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
if (req->sdiag_family == AF_INET) {
|
|
|
|
laddr.v4.sin_port = req->id.idiag_sport;
|
|
|
|
laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
|
|
|
|
laddr.v4.sin_family = AF_INET;
|
|
|
|
|
|
|
|
paddr.v4.sin_port = req->id.idiag_dport;
|
|
|
|
paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
|
|
|
|
paddr.v4.sin_family = AF_INET;
|
|
|
|
} else {
|
|
|
|
laddr.v6.sin6_port = req->id.idiag_sport;
|
2016-08-23 18:40:52 +03:00
|
|
|
memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
|
|
|
|
sizeof(laddr.v6.sin6_addr));
|
2016-04-14 10:35:33 +03:00
|
|
|
laddr.v6.sin6_family = AF_INET6;
|
|
|
|
|
|
|
|
paddr.v6.sin6_port = req->id.idiag_dport;
|
2016-08-23 18:40:52 +03:00
|
|
|
memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
|
|
|
|
sizeof(paddr.v6.sin6_addr));
|
2016-04-14 10:35:33 +03:00
|
|
|
paddr.v6.sin6_family = AF_INET6;
|
|
|
|
}
|
|
|
|
|
2022-01-01 02:37:37 +03:00
|
|
|
return sctp_transport_lookup_process(sctp_sock_dump_one,
|
2016-04-14 10:35:33 +03:00
|
|
|
net, &laddr, &paddr, &commp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
2020-02-26 02:04:15 +03:00
|
|
|
const struct inet_diag_req_v2 *r)
|
2016-04-14 10:35:33 +03:00
|
|
|
{
|
|
|
|
u32 idiag_states = r->idiag_states;
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct sctp_comm_param commp = {
|
|
|
|
.skb = skb,
|
|
|
|
.cb = cb,
|
|
|
|
.r = r,
|
2016-09-07 18:42:25 +03:00
|
|
|
.net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
|
2016-04-14 10:35:33 +03:00
|
|
|
};
|
2017-09-25 13:19:26 +03:00
|
|
|
int pos = cb->args[2];
|
2016-04-14 10:35:33 +03:00
|
|
|
|
|
|
|
/* eps hashtable dumps
|
|
|
|
* args:
|
|
|
|
* 0 : if it will traversal listen sock
|
|
|
|
* 1 : to record the sock pos of this time's traversal
|
|
|
|
* 4 : to work as a temporary variable to traversal list
|
|
|
|
*/
|
|
|
|
if (cb->args[0] == 0) {
|
|
|
|
if (!(idiag_states & TCPF_LISTEN))
|
|
|
|
goto skip;
|
|
|
|
if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
|
|
|
|
goto done;
|
|
|
|
skip:
|
|
|
|
cb->args[0] = 1;
|
|
|
|
cb->args[1] = 0;
|
|
|
|
cb->args[4] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* asocs by transport hashtable dump
|
|
|
|
* args:
|
|
|
|
* 1 : to record the assoc pos of this time's traversal
|
|
|
|
* 2 : to record the transport pos of this time's traversal
|
|
|
|
* 3 : to mark if we have dumped the ep info of the current asoc
|
|
|
|
* 4 : to work as a temporary variable to traversal list
|
2016-09-28 21:55:44 +03:00
|
|
|
* 5 : to save the sk we get from travelsing the tsp list.
|
2016-04-14 10:35:33 +03:00
|
|
|
*/
|
2016-08-04 13:11:57 +03:00
|
|
|
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
|
2016-04-14 10:35:33 +03:00
|
|
|
goto done;
|
2016-09-28 21:55:44 +03:00
|
|
|
|
sctp: use call_rcu to free endpoint
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99@syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones@linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-12-23 21:04:30 +03:00
|
|
|
sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
|
|
|
|
net, &pos, &commp);
|
2017-09-25 13:19:26 +03:00
|
|
|
cb->args[2] = pos;
|
2016-09-28 21:55:44 +03:00
|
|
|
|
2016-04-14 10:35:33 +03:00
|
|
|
done:
|
|
|
|
cb->args[1] = cb->args[4];
|
|
|
|
cb->args[4] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct inet_diag_handler sctp_diag_handler = {
|
|
|
|
.dump = sctp_diag_dump,
|
|
|
|
.dump_one = sctp_diag_dump_one,
|
|
|
|
.idiag_get_info = sctp_diag_get_info,
|
|
|
|
.idiag_type = IPPROTO_SCTP,
|
|
|
|
.idiag_info_size = sizeof(struct sctp_info),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init sctp_diag_init(void)
|
|
|
|
{
|
|
|
|
return inet_diag_register(&sctp_diag_handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit sctp_diag_exit(void)
|
|
|
|
{
|
|
|
|
inet_diag_unregister(&sctp_diag_handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(sctp_diag_init);
|
|
|
|
module_exit(sctp_diag_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
|