net-tcp: Disable TCP ssthresh metrics cache by default
This patch introduces a sysctl knob "net.ipv4.tcp_no_ssthresh_metrics_save" that disables TCP ssthresh metrics cache by default. Other parts of TCP metrics cache, e.g. rtt, cwnd, remain unchanged. As modern networks becoming more and more dynamic, TCP metrics cache today often causes more harm than benefits. For example, the same IP address is often shared by different subscribers behind NAT in residential networks. Even if the IP address is not shared by different users, caching the slow-start threshold of a previous short flow using loss-based congestion control (e.g. cubic) often causes the future longer flows of the same network path to exit slow-start prematurely with abysmal throughput. Caching ssthresh is very risky and can lead to terrible performance. Therefore it makes sense to make disabling ssthresh caching by default and opt-in for specific networks by the administrators. This practice also has worked well for several years of deployment with CUBIC congestion control at Google. Acked-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Kevin(Yudong) Yang <yyd@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
4e7696d90b
Коммит
65e6d90168
|
@ -479,6 +479,10 @@ tcp_no_metrics_save - BOOLEAN
|
|||
degradation. If set, TCP will not cache metrics on closing
|
||||
connections.
|
||||
|
||||
tcp_no_ssthresh_metrics_save - BOOLEAN
|
||||
Controls whether TCP saves ssthresh metrics in the route cache.
|
||||
Default is 1, which disables ssthresh metrics.
|
||||
|
||||
tcp_orphan_retries - INTEGER
|
||||
This value influences the timeout of a locally closed TCP connection,
|
||||
when RTO retransmissions remain unacknowledged.
|
||||
|
|
|
@ -154,6 +154,7 @@ struct netns_ipv4 {
|
|||
int sysctl_tcp_adv_win_scale;
|
||||
int sysctl_tcp_frto;
|
||||
int sysctl_tcp_nometrics_save;
|
||||
int sysctl_tcp_no_ssthresh_metrics_save;
|
||||
int sysctl_tcp_moderate_rcvbuf;
|
||||
int sysctl_tcp_tso_win_divisor;
|
||||
int sysctl_tcp_workaround_signed_windows;
|
||||
|
|
|
@ -1192,6 +1192,15 @@ static struct ctl_table ipv4_net_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_no_ssthresh_metrics_save",
|
||||
.data = &init_net.ipv4.sysctl_tcp_no_ssthresh_metrics_save,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
{
|
||||
.procname = "tcp_moderate_rcvbuf",
|
||||
.data = &init_net.ipv4.sysctl_tcp_moderate_rcvbuf,
|
||||
|
|
|
@ -2674,6 +2674,7 @@ static int __net_init tcp_sk_init(struct net *net)
|
|||
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
|
||||
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
|
||||
net->ipv4.sysctl_tcp_tw_reuse = 2;
|
||||
net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
|
||||
|
||||
cnt = tcp_hashinfo.ehash_mask + 1;
|
||||
net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
|
||||
|
|
|
@ -385,7 +385,8 @@ void tcp_update_metrics(struct sock *sk)
|
|||
|
||||
if (tcp_in_initial_slowstart(tp)) {
|
||||
/* Slow start still did not finish. */
|
||||
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
||||
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
|
||||
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
||||
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||
if (val && (tp->snd_cwnd >> 1) > val)
|
||||
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
||||
|
@ -400,7 +401,8 @@ void tcp_update_metrics(struct sock *sk)
|
|||
} else if (!tcp_in_slow_start(tp) &&
|
||||
icsk->icsk_ca_state == TCP_CA_Open) {
|
||||
/* Cong. avoidance phase, cwnd is reliable. */
|
||||
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
|
||||
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
|
||||
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
|
||||
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
||||
max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
|
||||
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
|
||||
|
@ -416,7 +418,8 @@ void tcp_update_metrics(struct sock *sk)
|
|||
tcp_metric_set(tm, TCP_METRIC_CWND,
|
||||
(val + tp->snd_ssthresh) >> 1);
|
||||
}
|
||||
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
||||
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
|
||||
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
||||
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||
if (val && tp->snd_ssthresh > val)
|
||||
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
||||
|
@ -441,6 +444,7 @@ void tcp_init_metrics(struct sock *sk)
|
|||
{
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
struct tcp_metrics_block *tm;
|
||||
u32 val, crtt = 0; /* cached RTT scaled by 8 */
|
||||
|
||||
|
@ -458,7 +462,8 @@ void tcp_init_metrics(struct sock *sk)
|
|||
if (tcp_metric_locked(tm, TCP_METRIC_CWND))
|
||||
tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
|
||||
|
||||
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||
val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
|
||||
0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||
if (val) {
|
||||
tp->snd_ssthresh = val;
|
||||
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
|
||||
|
|
Загрузка…
Ссылка в новой задаче