[TCP]: remove unused argument to cong_avoid op
None of the existing TCP congestion controls use the rtt value pased in the ca_ops->cong_avoid interface. Which is lucky because seq_rtt could have been -1 when handling a duplicate ack. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
44beac0086
Коммит
16751347a0
|
@ -652,8 +652,7 @@ struct tcp_congestion_ops {
|
|||
/* lower bound for congestion window (optional) */
|
||||
u32 (*min_cwnd)(const struct sock *sk);
|
||||
/* do new cwnd calculation (required) */
|
||||
void (*cong_avoid)(struct sock *sk, u32 ack,
|
||||
u32 rtt, u32 in_flight, int good_ack);
|
||||
void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack);
|
||||
/* call before changing ca_state (optional) */
|
||||
void (*set_state)(struct sock *sk, u8 new_state);
|
||||
/* call when cwnd event occurs (optional) */
|
||||
|
@ -684,8 +683,7 @@ extern void tcp_slow_start(struct tcp_sock *tp);
|
|||
|
||||
extern struct tcp_congestion_ops tcp_init_congestion_ops;
|
||||
extern u32 tcp_reno_ssthresh(struct sock *sk);
|
||||
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 rtt, u32 in_flight, int flag);
|
||||
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag);
|
||||
extern u32 tcp_reno_min_cwnd(const struct sock *sk);
|
||||
extern struct tcp_congestion_ops tcp_reno;
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
|
|||
}
|
||||
|
||||
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int data_acked)
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bictcp *ca = inet_csk_ca(sk);
|
||||
|
|
|
@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
|
|||
/* This is Jacobson's slow start and congestion avoidance.
|
||||
* SIGCOMM '88, p. 328.
|
||||
*/
|
||||
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
|
||||
int flag)
|
||||
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)
|
|||
}
|
||||
|
||||
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int data_acked)
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bictcp *ca = inet_csk_ca(sk);
|
||||
|
|
|
@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
|
|||
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
|
||||
}
|
||||
|
||||
static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
|
||||
static void hstcp_cong_avoid(struct sock *sk, u32 adk,
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
|
|||
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
|
||||
}
|
||||
|
||||
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)
|
|||
* o Give cwnd a new value based on the model proposed
|
||||
* o remember increments <1
|
||||
*/
|
||||
static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void hybla_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
|||
return;
|
||||
|
||||
if (!ca->hybla_en)
|
||||
return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
if (ca->rho == 0)
|
||||
hybla_recalc_param(sk);
|
||||
|
|
|
@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
|
|||
/*
|
||||
* Increase window in response to successful acknowledgment.
|
||||
*/
|
||||
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
|
|||
tcp_ack_no_tstamp(sk, seq_rtt, flag);
|
||||
}
|
||||
|
||||
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int good)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
|
||||
icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
|
||||
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
|
||||
}
|
||||
|
||||
|
@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|||
/* Advance CWND, if state allows this. */
|
||||
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
|
||||
tcp_may_raise_cwnd(sk, flag))
|
||||
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
|
||||
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
|
||||
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
|
||||
} else {
|
||||
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
||||
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
|
||||
tcp_cong_avoid(sk, ack, prior_in_flight, 1);
|
||||
}
|
||||
|
||||
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
|
||||
|
|
|
@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
|
|||
* Will only call newReno CA when away from inference.
|
||||
* From TCP-LP's paper, this will be handled in additive increasement.
|
||||
*/
|
||||
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
|
||||
int flag)
|
||||
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
|
||||
{
|
||||
struct lp *lp = inet_csk_ca(sk);
|
||||
|
||||
if (!(lp->flag & LP_WITHIN_INF))
|
||||
tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#define TCP_SCALABLE_AI_CNT 50U
|
||||
#define TCP_SCALABLE_MD_SCALE 3
|
||||
|
||||
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
|||
EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
|
||||
|
||||
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct vegas *vegas = inet_csk_ca(sk);
|
||||
|
||||
if (!vegas->doing_vegas_now)
|
||||
return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
|
||||
*
|
||||
|
@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
|||
/* We don't have enough RTT samples to do the Vegas
|
||||
* calculation, so we'll behave like Reno.
|
||||
*/
|
||||
tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
} else {
|
||||
u32 rtt, target_cwnd, diff;
|
||||
|
||||
|
|
|
@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
|||
}
|
||||
|
||||
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct veno *veno = inet_csk_ca(sk);
|
||||
|
||||
if (!veno->doing_veno_now)
|
||||
return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
/* limited by applications */
|
||||
if (!tcp_is_cwnd_limited(sk, in_flight))
|
||||
|
@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
|
|||
/* We don't have enough rtt samples to do the Veno
|
||||
* calculation, so we'll behave like Reno.
|
||||
*/
|
||||
tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
} else {
|
||||
u32 rtt, target_cwnd;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
|
|||
}
|
||||
|
||||
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct yeah *yeah = inet_csk_ca(sk);
|
||||
|
|
Загрузка…
Ссылка в новой задаче