tcp: introduce tcp_under_memory_pressure()
Introduce an optimized version of sk_under_memory_pressure() for TCP. Our intent is to use it in fast paths. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
a6c5ea4ccf
Коммит
b8da51ebb1
|
@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
|
||||||
extern struct percpu_counter tcp_sockets_allocated;
|
extern struct percpu_counter tcp_sockets_allocated;
|
||||||
extern int tcp_memory_pressure;
|
extern int tcp_memory_pressure;
|
||||||
|
|
||||||
|
/* optimized version of sk_under_memory_pressure() for TCP sockets */
|
||||||
|
static inline bool tcp_under_memory_pressure(const struct sock *sk)
|
||||||
|
{
|
||||||
|
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
|
||||||
|
return !!sk->sk_cgrp->memory_pressure;
|
||||||
|
|
||||||
|
return tcp_memory_pressure;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* The next routines deal with comparing 32 bit unsigned ints
|
* The next routines deal with comparing 32 bit unsigned ints
|
||||||
* and worry about wraparound (automatic with unsigned arithmetic).
|
* and worry about wraparound (automatic with unsigned arithmetic).
|
||||||
|
|
|
@ -359,7 +359,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
||||||
/* Check #1 */
|
/* Check #1 */
|
||||||
if (tp->rcv_ssthresh < tp->window_clamp &&
|
if (tp->rcv_ssthresh < tp->window_clamp &&
|
||||||
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
||||||
!sk_under_memory_pressure(sk)) {
|
!tcp_under_memory_pressure(sk)) {
|
||||||
int incr;
|
int incr;
|
||||||
|
|
||||||
/* Check #2. Increase window, if skb with such overhead
|
/* Check #2. Increase window, if skb with such overhead
|
||||||
|
@ -446,7 +446,7 @@ static void tcp_clamp_window(struct sock *sk)
|
||||||
|
|
||||||
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
|
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
|
||||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
|
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
|
||||||
!sk_under_memory_pressure(sk) &&
|
!tcp_under_memory_pressure(sk) &&
|
||||||
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
|
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
|
||||||
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
|
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
|
||||||
sysctl_tcp_rmem[2]);
|
sysctl_tcp_rmem[2]);
|
||||||
|
@ -4781,7 +4781,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||||
|
|
||||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||||
tcp_clamp_window(sk);
|
tcp_clamp_window(sk);
|
||||||
else if (sk_under_memory_pressure(sk))
|
else if (tcp_under_memory_pressure(sk))
|
||||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
||||||
|
|
||||||
tcp_collapse_ofo_queue(sk);
|
tcp_collapse_ofo_queue(sk);
|
||||||
|
@ -4825,7 +4825,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If we are under global TCP memory pressure, do not expand. */
|
/* If we are under global TCP memory pressure, do not expand. */
|
||||||
if (sk_under_memory_pressure(sk))
|
if (tcp_under_memory_pressure(sk))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If we are under soft global TCP memory pressure, do not expand. */
|
/* If we are under soft global TCP memory pressure, do not expand. */
|
||||||
|
|
|
@ -2392,7 +2392,7 @@ u32 __tcp_select_window(struct sock *sk)
|
||||||
if (free_space < (full_space >> 1)) {
|
if (free_space < (full_space >> 1)) {
|
||||||
icsk->icsk_ack.quick = 0;
|
icsk->icsk_ack.quick = 0;
|
||||||
|
|
||||||
if (sk_under_memory_pressure(sk))
|
if (tcp_under_memory_pressure(sk))
|
||||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
|
||||||
4U * tp->advmss);
|
4U * tp->advmss);
|
||||||
|
|
||||||
|
@ -2843,7 +2843,7 @@ void tcp_send_fin(struct sock *sk)
|
||||||
* Note: in the latter case, FIN packet will be sent after a timeout,
|
* Note: in the latter case, FIN packet will be sent after a timeout,
|
||||||
* as TCP stack thinks it has already been transmitted.
|
* as TCP stack thinks it has already been transmitted.
|
||||||
*/
|
*/
|
||||||
if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
|
if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
|
||||||
coalesce:
|
coalesce:
|
||||||
TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
|
TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
|
||||||
TCP_SKB_CB(tskb)->end_seq++;
|
TCP_SKB_CB(tskb)->end_seq++;
|
||||||
|
|
|
@ -247,7 +247,7 @@ void tcp_delack_timer_handler(struct sock *sk)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (sk_under_memory_pressure(sk))
|
if (tcp_under_memory_pressure(sk))
|
||||||
sk_mem_reclaim(sk);
|
sk_mem_reclaim(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче