2019-05-27 09:55:01 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2008-02-08 08:49:26 +03:00
|
|
|
/*
|
|
|
|
* IPv6 Syncookies implementation for the Linux kernel
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Glenn Griffin <ggriffin.kernel@gmail.com>
|
|
|
|
*
|
|
|
|
* Based on IPv4 implementation by Andi Kleen
|
|
|
|
* linux/net/ipv4/syncookies.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/random.h>
|
2017-01-08 15:54:03 +03:00
|
|
|
#include <linux/siphash.h>
|
2008-02-08 08:49:26 +03:00
|
|
|
#include <linux/kernel.h>
|
2017-05-05 16:56:54 +03:00
|
|
|
#include <net/secure_seq.h>
|
2008-02-08 08:49:26 +03:00
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
|
|
|
|
#define COOKIEBITS 24 /* Upper bits store count */
|
|
|
|
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
|
|
|
|
|
2017-01-08 15:54:03 +03:00
|
|
|
static siphash_key_t syncookie6_secret[2] __read_mostly;
|
2013-10-19 23:48:56 +04:00
|
|
|
|
2013-09-21 00:32:56 +04:00
|
|
|
/* RFC 2460, Section 8.3:
|
|
|
|
* [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
|
|
|
|
*
|
|
|
|
* Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
|
|
|
|
* using higher values than ipv4 tcp syncookies.
|
|
|
|
* The other values are chosen based on ethernet (1500 and 9k MTU), plus
|
|
|
|
* one that accounts for common encap (PPPoe) overhead. Table must be sorted.
|
|
|
|
*/
|
2008-02-08 08:49:26 +03:00
|
|
|
static __u16 const msstab[] = {
|
2013-09-21 00:32:56 +04:00
|
|
|
1280 - 60, /* IPV6_MIN_MTU - 60 */
|
2010-06-03 04:43:57 +04:00
|
|
|
1480 - 60,
|
|
|
|
1500 - 60,
|
|
|
|
9000 - 60,
|
2008-02-08 08:49:26 +03:00
|
|
|
};
|
|
|
|
|
2017-01-08 15:54:03 +03:00
|
|
|
static u32 cookie_hash(const struct in6_addr *saddr,
|
|
|
|
const struct in6_addr *daddr,
|
2008-02-08 08:49:26 +03:00
|
|
|
__be16 sport, __be16 dport, u32 count, int c)
|
|
|
|
{
|
2017-01-08 15:54:03 +03:00
|
|
|
const struct {
|
|
|
|
struct in6_addr saddr;
|
|
|
|
struct in6_addr daddr;
|
|
|
|
u32 count;
|
|
|
|
__be16 sport;
|
|
|
|
__be16 dport;
|
|
|
|
} __aligned(SIPHASH_ALIGNMENT) combined = {
|
|
|
|
.saddr = *saddr,
|
|
|
|
.daddr = *daddr,
|
|
|
|
.count = count,
|
|
|
|
.sport = sport,
|
|
|
|
.dport = dport
|
|
|
|
};
|
2013-10-19 23:48:56 +04:00
|
|
|
|
|
|
|
net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
|
2017-01-08 15:54:03 +03:00
|
|
|
return siphash(&combined, offsetofend(typeof(combined), dport),
|
|
|
|
&syncookie6_secret[c]);
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|
|
|
|
|
2011-04-22 08:53:02 +04:00
|
|
|
static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
|
|
|
|
const struct in6_addr *daddr,
|
2008-02-08 08:49:26 +03:00
|
|
|
__be16 sport, __be16 dport, __u32 sseq,
|
2013-09-21 00:32:55 +04:00
|
|
|
__u32 data)
|
2008-02-08 08:49:26 +03:00
|
|
|
{
|
2013-09-21 00:32:55 +04:00
|
|
|
u32 count = tcp_cookie_time();
|
2008-02-08 08:49:26 +03:00
|
|
|
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
|
|
|
|
sseq + (count << COOKIEBITS) +
|
|
|
|
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
|
|
|
|
& COOKIEMASK));
|
|
|
|
}
|
|
|
|
|
2011-04-22 08:53:02 +04:00
|
|
|
static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
|
|
|
|
const struct in6_addr *daddr, __be16 sport,
|
2013-09-21 00:32:55 +04:00
|
|
|
__be16 dport, __u32 sseq)
|
2008-02-08 08:49:26 +03:00
|
|
|
{
|
2013-09-21 00:32:55 +04:00
|
|
|
__u32 diff, count = tcp_cookie_time();
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
|
|
|
|
|
|
|
|
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
|
2013-09-21 00:32:55 +04:00
|
|
|
if (diff >= MAX_SYNCOOKIE_AGE)
|
2008-02-08 08:49:26 +03:00
|
|
|
return (__u32)-1;
|
|
|
|
|
|
|
|
return (cookie -
|
|
|
|
cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
|
|
|
|
& COOKIEMASK;
|
|
|
|
}
|
|
|
|
|
2013-08-27 10:50:15 +04:00
|
|
|
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
|
|
|
|
const struct tcphdr *th, __u16 *mssp)
|
2008-02-08 08:49:26 +03:00
|
|
|
{
|
|
|
|
int mssind;
|
|
|
|
const __u16 mss = *mssp;
|
|
|
|
|
2010-06-03 04:43:57 +04:00
|
|
|
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
|
|
|
|
if (mss >= msstab[mssind])
|
|
|
|
break;
|
|
|
|
|
|
|
|
*mssp = msstab[mssind];
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
|
2013-09-21 00:32:55 +04:00
|
|
|
th->dest, ntohl(th->seq), mssind);
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|
2013-08-27 10:50:15 +04:00
|
|
|
EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
2015-09-29 17:42:49 +03:00
|
|
|
__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
|
2008-02-08 08:49:26 +03:00
|
|
|
{
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
2008-02-08 08:49:26 +03:00
|
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
2013-08-27 10:50:15 +04:00
|
|
|
|
|
|
|
return __cookie_v6_init_sequence(iph, th, mssp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
|
|
|
|
__u32 cookie)
|
|
|
|
{
|
2008-02-08 08:49:26 +03:00
|
|
|
__u32 seq = ntohl(th->seq) - 1;
|
|
|
|
__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
|
2013-09-21 00:32:55 +04:00
|
|
|
th->source, th->dest, seq);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
2010-06-03 04:43:57 +04:00
|
|
|
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|
2013-08-27 10:50:15 +04:00
|
|
|
EXPORT_SYMBOL_GPL(__cookie_v6_check);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|
|
|
{
|
2009-12-02 21:25:27 +03:00
|
|
|
struct tcp_options_received tcp_opt;
|
2008-02-08 08:49:26 +03:00
|
|
|
struct inet_request_sock *ireq;
|
|
|
|
struct tcp_request_sock *treq;
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
const struct tcphdr *th = tcp_hdr(skb);
|
|
|
|
__u32 cookie = ntohl(th->ack_seq) - 1;
|
|
|
|
struct sock *ret = sk;
|
|
|
|
struct request_sock *req;
|
2020-11-10 03:16:31 +03:00
|
|
|
int full_space, mss;
|
2008-02-08 08:49:26 +03:00
|
|
|
struct dst_entry *dst;
|
|
|
|
__u8 rcv_wscale;
|
2017-05-05 16:56:54 +03:00
|
|
|
u32 tsoff = 0;
|
2008-02-08 08:49:26 +03:00
|
|
|
|
2016-02-03 10:46:51 +03:00
|
|
|
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
|
2008-02-08 08:49:26 +03:00
|
|
|
goto out;
|
|
|
|
|
2014-10-30 04:55:38 +03:00
|
|
|
if (tcp_synq_no_recent_overflow(sk))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
|
|
|
|
if (mss == 0) {
|
2016-04-28 02:44:39 +03:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
2008-02-08 08:49:26 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-04-28 02:44:39 +03:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 07:56:42 +03:00
|
|
|
/* check for timestamp cookie support */
|
|
|
|
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
2017-06-07 20:34:36 +03:00
|
|
|
tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 07:56:42 +03:00
|
|
|
|
2017-05-05 16:56:54 +03:00
|
|
|
if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
|
2017-06-07 20:34:39 +03:00
|
|
|
tsoff = secure_tcpv6_ts_off(sock_net(sk),
|
|
|
|
ipv6_hdr(skb)->daddr.s6_addr32,
|
2017-05-05 16:56:54 +03:00
|
|
|
ipv6_hdr(skb)->saddr.s6_addr32);
|
|
|
|
tcp_opt.rcv_tsecr -= tsoff;
|
|
|
|
}
|
|
|
|
|
2017-06-07 20:34:37 +03:00
|
|
|
if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
|
2010-06-17 01:42:15 +04:00
|
|
|
goto out;
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 07:56:42 +03:00
|
|
|
|
2008-02-08 08:49:26 +03:00
|
|
|
ret = NULL;
|
2020-07-30 22:25:55 +03:00
|
|
|
req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb);
|
2008-02-08 08:49:26 +03:00
|
|
|
if (!req)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ireq = inet_rsk(req);
|
|
|
|
treq = tcp_rsk(req);
|
2015-03-18 04:32:29 +03:00
|
|
|
treq->tfo_listener = false;
|
2008-02-08 08:49:26 +03:00
|
|
|
|
2008-08-04 05:13:44 +04:00
|
|
|
if (security_inet_conn_request(sk, skb, req))
|
|
|
|
goto out_free;
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
req->mss = mss;
|
2013-10-10 02:21:29 +04:00
|
|
|
ireq->ir_rmt_port = th->source;
|
2013-10-10 11:04:37 +04:00
|
|
|
ireq->ir_num = ntohs(th->dest);
|
2013-10-10 02:21:29 +04:00
|
|
|
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
|
|
|
|
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
2014-09-27 20:50:56 +04:00
|
|
|
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
|
2008-02-08 08:49:26 +03:00
|
|
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
|
|
|
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
|
2017-06-30 13:07:58 +03:00
|
|
|
refcount_inc(&skb->users);
|
2013-10-10 02:21:29 +04:00
|
|
|
ireq->pktopts = skb;
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|
|
|
|
|
2015-12-17 00:20:44 +03:00
|
|
|
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
|
2008-02-08 08:49:26 +03:00
|
|
|
/* So that link locals have meaning */
|
|
|
|
if (!sk->sk_bound_dev_if &&
|
2013-10-10 02:21:29 +04:00
|
|
|
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
2014-10-17 20:17:20 +04:00
|
|
|
ireq->ir_iif = tcp_v6_iif(skb);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
net: support marking accepting TCP sockets
When using mark-based routing, sockets returned from accept()
may need to be marked differently depending on the incoming
connection request.
This is the case, for example, if different socket marks identify
different networks: a listening socket may want to accept
connections from all networks, but each connection should be
marked with the network that the request came in on, so that
subsequent packets are sent on the correct network.
This patch adds a sysctl to mark TCP sockets based on the fwmark
of the incoming SYN packet. If enabled, and an unmarked socket
receives a SYN, then the SYN packet's fwmark is written to the
connection's inet_request_sock, and later written back to the
accepted socket when the connection is established. If the
socket already has a nonzero mark, then the behaviour is the same
as it is today, i.e., the listening socket's fwmark is used.
Black-box tested using user-mode linux:
- IPv4/IPv6 SYN+ACK, FIN, etc. packets are routed based on the
mark of the incoming SYN packet.
- The socket returned by accept() is marked with the mark of the
incoming SYN packet.
- Tested with syncookies=1 and syncookies=2.
Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-13 21:17:35 +04:00
|
|
|
ireq->ir_mark = inet_request_mark(sk, skb);
|
|
|
|
|
2012-10-28 03:16:46 +04:00
|
|
|
req->num_retrans = 0;
|
tcp: Revert per-route SACK/DSACK/TIMESTAMP changes.
It creates a regression, triggering badness for SYN_RECV
sockets, for example:
[19148.022102] Badness at net/ipv4/inet_connection_sock.c:293
[19148.022570] NIP: c02a0914 LR: c02a0904 CTR: 00000000
[19148.023035] REGS: eeecbd30 TRAP: 0700 Not tainted (2.6.32)
[19148.023496] MSR: 00029032 <EE,ME,CE,IR,DR> CR: 24002442 XER: 00000000
[19148.024012] TASK = eee9a820[1756] 'privoxy' THREAD: eeeca000
This is likely caused by the change in the 'estab' parameter
passed to tcp_parse_options() when invoked by the functions
in net/ipv4/tcp_minisocks.c
But even if that is fixed, the ->conn_request() changes made in
this patch series is fundamentally wrong. They try to use the
listening socket's 'dst' to probe the route settings. The
listening socket doesn't even have a route, and you can't
get the right route (the child request one) until much later
after we setup all of the state, and it must be done by hand.
This stuff really isn't ready, so the best thing to do is a
full revert. This reverts the following commits:
f55017a93f1a74d50244b1254b9a2bd7ac9bbf7d
022c3f7d82f0f1c68018696f2f027b87b9bb45c2
1aba721eba1d84a2defce45b950272cee1e6c72a
cda42ebd67ee5fdf09d7057b5a4584d36fe8a335
345cda2fd695534be5a4494f1b59da9daed33663
dc343475ed062e13fc260acccaab91d7d80fd5b2
05eaade2782fb0c90d3034fd7a7d5a16266182bb
6a2a2d6bf8581216e08be15fcb563cfd6c430e1e
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-12-16 07:56:42 +03:00
|
|
|
ireq->snd_wscale = tcp_opt.snd_wscale;
|
|
|
|
ireq->sack_ok = tcp_opt.sack_ok;
|
|
|
|
ireq->wscale_ok = tcp_opt.wscale_ok;
|
|
|
|
ireq->tstamp_ok = tcp_opt.saw_tstamp;
|
|
|
|
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
|
2017-05-17 00:00:14 +03:00
|
|
|
treq->snt_synack = 0;
|
2008-02-08 08:49:26 +03:00
|
|
|
treq->rcv_isn = ntohl(th->seq) - 1;
|
|
|
|
treq->snt_isn = cookie;
|
2016-12-01 13:32:06 +03:00
|
|
|
treq->ts_off = 0;
|
2017-07-17 13:35:58 +03:00
|
|
|
treq->txhash = net_tx_rndhash();
|
2018-03-23 13:05:45 +03:00
|
|
|
if (IS_ENABLED(CONFIG_SMC))
|
|
|
|
ireq->smc_ok = 0;
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to lookup the dst_entry to get the correct window size.
|
|
|
|
* This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten
|
|
|
|
* me if there is a preferred way.
|
|
|
|
*/
|
|
|
|
{
|
2010-06-02 01:35:01 +04:00
|
|
|
struct in6_addr *final_p, final;
|
2011-03-13 00:22:43 +03:00
|
|
|
struct flowi6 fl6;
|
|
|
|
memset(&fl6, 0, sizeof(fl6));
|
|
|
|
fl6.flowi6_proto = IPPROTO_TCP;
|
2013-10-10 02:21:29 +04:00
|
|
|
fl6.daddr = ireq->ir_v6_rmt_addr;
|
2015-11-30 06:37:57 +03:00
|
|
|
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
|
2013-10-10 02:21:29 +04:00
|
|
|
fl6.saddr = ireq->ir_v6_loc_addr;
|
2015-12-17 00:20:44 +03:00
|
|
|
fl6.flowi6_oif = ireq->ir_iif;
|
net: support marking accepting TCP sockets
When using mark-based routing, sockets returned from accept()
may need to be marked differently depending on the incoming
connection request.
This is the case, for example, if different socket marks identify
different networks: a listening socket may want to accept
connections from all networks, but each connection should be
marked with the network that the request came in on, so that
subsequent packets are sent on the correct network.
This patch adds a sysctl to mark TCP sockets based on the fwmark
of the incoming SYN packet. If enabled, and an unmarked socket
receives a SYN, then the SYN packet's fwmark is written to the
connection's inet_request_sock, and later written back to the
accepted socket when the connection is established. If the
socket already has a nonzero mark, then the behaviour is the same
as it is today, i.e., the listening socket's fwmark is used.
Black-box tested using user-mode linux:
- IPv4/IPv6 SYN+ACK, FIN, etc. packets are routed based on the
mark of the incoming SYN packet.
- The socket returned by accept() is marked with the mark of the
incoming SYN packet.
- Tested with syncookies=1 and syncookies=2.
Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-13 21:17:35 +04:00
|
|
|
fl6.flowi6_mark = ireq->ir_mark;
|
2013-10-10 02:21:29 +04:00
|
|
|
fl6.fl6_dport = ireq->ir_rmt_port;
|
2011-03-13 00:36:19 +03:00
|
|
|
fl6.fl6_sport = inet_sk(sk)->inet_sport;
|
2016-11-03 20:23:43 +03:00
|
|
|
fl6.flowi6_uid = sk->sk_uid;
|
2020-09-28 05:38:26 +03:00
|
|
|
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
|
2011-03-13 00:22:43 +03:00
|
|
|
|
2019-12-04 17:35:52 +03:00
|
|
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
2011-03-02 00:19:07 +03:00
|
|
|
if (IS_ERR(dst))
|
2008-08-04 05:13:44 +04:00
|
|
|
goto out_free;
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|
|
|
|
|
2015-10-09 05:33:23 +03:00
|
|
|
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
|
2020-11-10 03:16:31 +03:00
|
|
|
/* limit the window selection if the user enforce a smaller rx buffer */
|
|
|
|
full_space = tcp_full_space(sk);
|
|
|
|
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
|
|
|
(req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
|
|
|
|
req->rsk_window_clamp = full_space;
|
|
|
|
|
|
|
|
tcp_select_initial_window(sk, full_space, req->mss,
|
2015-10-09 05:33:23 +03:00
|
|
|
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
|
2009-12-15 14:15:28 +03:00
|
|
|
ireq->wscale_ok, &rcv_wscale,
|
|
|
|
dst_metric(dst, RTAX_INITRWND));
|
2008-02-08 08:49:26 +03:00
|
|
|
|
|
|
|
ireq->rcv_wscale = rcv_wscale;
|
net: allow setting ecn via routing table
This patch allows to set ECN on a per-route basis in case the sysctl
tcp_ecn is not set to 1. In other words, when ECN is set for specific
routes, it provides a tcp_ecn=1 behaviour for that route while the rest
of the stack acts according to the global settings.
One can use 'ip route change dev $dev $net features ecn' to toggle this.
Having a more fine-grained per-route setting can be beneficial for various
reasons, for example, 1) within data centers, or 2) local ISPs may deploy
ECN support for their own video/streaming services [1], etc.
There was a recent measurement study/paper [2] which scanned the Alexa's
publicly available top million websites list from a vantage point in US,
Europe and Asia:
Half of the Alexa list will now happily use ECN (tcp_ecn=2, most likely
blamed to commit 255cac91c3 ("tcp: extend ECN sysctl to allow server-side
only ECN") ;)); the break in connectivity on-path was found is about
1 in 10,000 cases. Timeouts rather than receiving back RSTs were much
more common in the negotiation phase (and mostly seen in the Alexa
middle band, ranks around 50k-150k): from 12-thousand hosts on which
there _may_ be ECN-linked connection failures, only 79 failed with RST
when _not_ failing with RST when ECN is not requested.
It's unclear though, how much equipment in the wild actually marks CE
when buffers start to fill up.
We thought about a fallback to non-ECN for retransmitted SYNs as another
global option (which could perhaps one day be made default), but as Eric
points out, there's much more work needed to detect broken middleboxes.
Two examples Eric mentioned are buggy firewalls that accept only a single
SYN per flow, and middleboxes that successfully let an ECN flow establish,
but later mark CE for all packets (so cwnd converges to 1).
[1] http://www.ietf.org/proceedings/89/slides/slides-89-tsvarea-1.pdf, p.15
[2] http://ecn.ethz.ch/
Joint work with Daniel Borkmann.
Reference: http://thread.gmane.org/gmane.linux.network/335797
Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-03 19:35:03 +03:00
|
|
|
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
|
2008-02-08 08:49:26 +03:00
|
|
|
|
2017-05-05 16:56:54 +03:00
|
|
|
ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
|
2008-08-04 05:13:44 +04:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
out_free:
|
|
|
|
reqsk_free(req);
|
|
|
|
return NULL;
|
2008-02-08 08:49:26 +03:00
|
|
|
}
|