tcp: fix reordering SNMP under-counting
Currently the reordering SNMP counters only increase if a connection sees a higher degree then it has previously seen. It ignores if the reordering degree is not greater than the default system threshold. This significantly under-counts the number of reordering events and falsely convey that reordering is rare on the network. This patch properly and faithfully records the number of reordering events detected by the TCP stack, just like the comment says "this exciting event is worth to be remembered". Note that even so TCP still under-estimate the actual reordering events because TCP requires TS options or certain packet sequences to detect reordering (i.e. ACKing never-retransmitted sequence in recovery or disordered state). Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
ecde8f36f8
Коммит
2d2517ee31
|
@ -878,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||||
const int ts)
|
const int ts)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
if (metric > tp->reordering) {
|
|
||||||
int mib_idx;
|
int mib_idx;
|
||||||
|
|
||||||
|
if (metric > tp->reordering) {
|
||||||
tp->reordering = min(sysctl_tcp_max_reordering, metric);
|
tp->reordering = min(sysctl_tcp_max_reordering, metric);
|
||||||
|
|
||||||
/* This exciting event is worth to be remembered. 8) */
|
|
||||||
if (ts)
|
|
||||||
mib_idx = LINUX_MIB_TCPTSREORDER;
|
|
||||||
else if (tcp_is_reno(tp))
|
|
||||||
mib_idx = LINUX_MIB_TCPRENOREORDER;
|
|
||||||
else if (tcp_is_fack(tp))
|
|
||||||
mib_idx = LINUX_MIB_TCPFACKREORDER;
|
|
||||||
else
|
|
||||||
mib_idx = LINUX_MIB_TCPSACKREORDER;
|
|
||||||
|
|
||||||
NET_INC_STATS(sock_net(sk), mib_idx);
|
|
||||||
#if FASTRETRANS_DEBUG > 1
|
#if FASTRETRANS_DEBUG > 1
|
||||||
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
|
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
|
||||||
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
|
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
|
||||||
|
@ -906,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||||
}
|
}
|
||||||
|
|
||||||
tp->rack.reord = 1;
|
tp->rack.reord = 1;
|
||||||
|
|
||||||
|
/* This exciting event is worth to be remembered. 8) */
|
||||||
|
if (ts)
|
||||||
|
mib_idx = LINUX_MIB_TCPTSREORDER;
|
||||||
|
else if (tcp_is_reno(tp))
|
||||||
|
mib_idx = LINUX_MIB_TCPRENOREORDER;
|
||||||
|
else if (tcp_is_fack(tp))
|
||||||
|
mib_idx = LINUX_MIB_TCPFACKREORDER;
|
||||||
|
else
|
||||||
|
mib_idx = LINUX_MIB_TCPSACKREORDER;
|
||||||
|
|
||||||
|
NET_INC_STATS(sock_net(sk), mib_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This must be called before lost_out is incremented */
|
/* This must be called before lost_out is incremented */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче