tcp: pause Fast Open globally after third consecutive timeout
Prior to this patch, active Fast Open is paused on a specific destination IP address if the previous connections to the IP address have experienced recurring timeouts . But recent experiments by Microsoft (https://goo.gl/cykmn7) and Mozilla browsers indicate the isssue is often caused by broken middle-boxes sitting close to the client. Therefore it is much better user experience if Fast Open is disabled out-right globally to avoid experiencing further timeouts on connections toward other destinations. This patch changes the destination-IP disablement to global disablement if a connection experiencing recurring timeouts or aborts due to timeout. Repeated incidents would still exponentially increase the pause time, starting from an hour. This is extremely conservative but an unfortunate compromise to minimize bad experience due to broken middle-boxes. Reported-by: Dragana Damjanovic <ddamjanovic@mozilla.com> Reported-by: Patrick McManus <mcmanus@ducksong.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Reviewed-by: Wei Wang <weiwan@google.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
8a83c5d796
Коммит
7268586baa
|
@ -606,6 +606,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
|
||||||
This time period will grow exponentially when more blackhole issues
|
This time period will grow exponentially when more blackhole issues
|
||||||
get detected right after Fastopen is re-enabled and will reset to
|
get detected right after Fastopen is re-enabled and will reset to
|
||||||
initial value when the blackhole issue goes away.
|
initial value when the blackhole issue goes away.
|
||||||
|
0 to disable the blackhole detection.
|
||||||
By default, it is set to 1hr.
|
By default, it is set to 1hr.
|
||||||
|
|
||||||
tcp_syn_retries - INTEGER
|
tcp_syn_retries - INTEGER
|
||||||
|
|
|
@ -1507,8 +1507,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
|
||||||
|
|
||||||
/* From tcp_fastopen.c */
|
/* From tcp_fastopen.c */
|
||||||
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
||||||
struct tcp_fastopen_cookie *cookie, int *syn_loss,
|
struct tcp_fastopen_cookie *cookie);
|
||||||
unsigned long *last_syn_loss);
|
|
||||||
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
|
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
|
||||||
struct tcp_fastopen_cookie *cookie, bool syn_lost,
|
struct tcp_fastopen_cookie *cookie, bool syn_lost,
|
||||||
u16 try_exp);
|
u16 try_exp);
|
||||||
|
@ -1546,7 +1545,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
||||||
void tcp_fastopen_active_disable(struct sock *sk);
|
void tcp_fastopen_active_disable(struct sock *sk);
|
||||||
bool tcp_fastopen_active_should_disable(struct sock *sk);
|
bool tcp_fastopen_active_should_disable(struct sock *sk);
|
||||||
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
|
void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
|
||||||
void tcp_fastopen_active_timeout_reset(void);
|
void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
|
||||||
|
|
||||||
/* Latencies incurred by various limits for a sender. They are
|
/* Latencies incurred by various limits for a sender. They are
|
||||||
* chronograph-like stats that are mutually exclusive.
|
* chronograph-like stats that are mutually exclusive.
|
||||||
|
|
|
@ -379,18 +379,9 @@ fastopen:
|
||||||
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
|
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
|
||||||
struct tcp_fastopen_cookie *cookie)
|
struct tcp_fastopen_cookie *cookie)
|
||||||
{
|
{
|
||||||
unsigned long last_syn_loss = 0;
|
|
||||||
const struct dst_entry *dst;
|
const struct dst_entry *dst;
|
||||||
int syn_loss = 0;
|
|
||||||
|
|
||||||
tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
|
tcp_fastopen_cache_get(sk, mss, cookie);
|
||||||
|
|
||||||
/* Recurring FO SYN losses: no cookie or data in SYN */
|
|
||||||
if (syn_loss > 1 &&
|
|
||||||
time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
|
|
||||||
cookie->len = -1;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Firewall blackhole issue check */
|
/* Firewall blackhole issue check */
|
||||||
if (tcp_fastopen_active_should_disable(sk)) {
|
if (tcp_fastopen_active_should_disable(sk)) {
|
||||||
|
@ -448,6 +439,8 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
|
||||||
* following circumstances:
|
* following circumstances:
|
||||||
* 1. client side TFO socket receives out of order FIN
|
* 1. client side TFO socket receives out of order FIN
|
||||||
* 2. client side TFO socket receives out of order RST
|
* 2. client side TFO socket receives out of order RST
|
||||||
|
* 3. client side TFO socket has timed out three times consecutively during
|
||||||
|
* or after handshake
|
||||||
* We disable active side TFO globally for 1hr at first. Then if it
|
* We disable active side TFO globally for 1hr at first. Then if it
|
||||||
* happens again, we disable it for 2h, then 4h, 8h, ...
|
* happens again, we disable it for 2h, then 4h, 8h, ...
|
||||||
* And we reset the timeout back to 1hr when we see a successful active
|
* And we reset the timeout back to 1hr when we see a successful active
|
||||||
|
@ -524,3 +517,20 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
|
||||||
|
{
|
||||||
|
u32 timeouts = inet_csk(sk)->icsk_retransmits;
|
||||||
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
/* Broken middle-boxes may black-hole Fast Open connection during or
|
||||||
|
* even after the handshake. Be extremely conservative and pause
|
||||||
|
* Fast Open globally after hitting the third consecutive timeout or
|
||||||
|
* exceeding the configured timeout limit.
|
||||||
|
*/
|
||||||
|
if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
|
||||||
|
(timeouts == 2 || (timeouts < 2 && expired))) {
|
||||||
|
tcp_fastopen_active_disable(sk);
|
||||||
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -546,8 +546,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
|
||||||
static DEFINE_SEQLOCK(fastopen_seqlock);
|
static DEFINE_SEQLOCK(fastopen_seqlock);
|
||||||
|
|
||||||
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
||||||
struct tcp_fastopen_cookie *cookie,
|
struct tcp_fastopen_cookie *cookie)
|
||||||
int *syn_loss, unsigned long *last_syn_loss)
|
|
||||||
{
|
{
|
||||||
struct tcp_metrics_block *tm;
|
struct tcp_metrics_block *tm;
|
||||||
|
|
||||||
|
@ -564,8 +563,6 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
||||||
*cookie = tfom->cookie;
|
*cookie = tfom->cookie;
|
||||||
if (cookie->len <= 0 && tfom->try_exp == 1)
|
if (cookie->len <= 0 && tfom->try_exp == 1)
|
||||||
cookie->exp = true;
|
cookie->exp = true;
|
||||||
*syn_loss = tfom->syn_loss;
|
|
||||||
*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
|
|
||||||
} while (read_seqretry(&fastopen_seqlock, seq));
|
} while (read_seqretry(&fastopen_seqlock, seq));
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -183,11 +183,6 @@ static int tcp_write_timeout(struct sock *sk)
|
||||||
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||||
if (icsk->icsk_retransmits) {
|
if (icsk->icsk_retransmits) {
|
||||||
dst_negative_advice(sk);
|
dst_negative_advice(sk);
|
||||||
if (tp->syn_fastopen || tp->syn_data)
|
|
||||||
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
|
|
||||||
if (tp->syn_data && icsk->icsk_retransmits == 1)
|
|
||||||
NET_INC_STATS(sock_net(sk),
|
|
||||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
|
||||||
} else if (!tp->syn_data && !tp->syn_fastopen) {
|
} else if (!tp->syn_data && !tp->syn_fastopen) {
|
||||||
sk_rethink_txhash(sk);
|
sk_rethink_txhash(sk);
|
||||||
}
|
}
|
||||||
|
@ -195,17 +190,6 @@ static int tcp_write_timeout(struct sock *sk)
|
||||||
expired = icsk->icsk_retransmits >= retry_until;
|
expired = icsk->icsk_retransmits >= retry_until;
|
||||||
} else {
|
} else {
|
||||||
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
|
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
|
||||||
/* Some middle-boxes may black-hole Fast Open _after_
|
|
||||||
* the handshake. Therefore we conservatively disable
|
|
||||||
* Fast Open on this path on recurring timeouts after
|
|
||||||
* successful Fast Open.
|
|
||||||
*/
|
|
||||||
if (tp->syn_data_acked) {
|
|
||||||
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
|
|
||||||
if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
|
|
||||||
NET_INC_STATS(sock_net(sk),
|
|
||||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
|
||||||
}
|
|
||||||
/* Black hole detection */
|
/* Black hole detection */
|
||||||
tcp_mtu_probing(icsk, sk);
|
tcp_mtu_probing(icsk, sk);
|
||||||
|
|
||||||
|
@ -228,6 +212,7 @@ static int tcp_write_timeout(struct sock *sk)
|
||||||
expired = retransmits_timed_out(sk, retry_until,
|
expired = retransmits_timed_out(sk, retry_until,
|
||||||
icsk->icsk_user_timeout);
|
icsk->icsk_user_timeout);
|
||||||
}
|
}
|
||||||
|
tcp_fastopen_active_detect_blackhole(sk, expired);
|
||||||
if (expired) {
|
if (expired) {
|
||||||
/* Has it gone just too far? */
|
/* Has it gone just too far? */
|
||||||
tcp_write_err(sk);
|
tcp_write_err(sk);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче