tcp: do not set queue_mapping on SYNACK
At the time of commit fff3269907
("tcp: reflect SYN queue_mapping into
SYNACK packets") we had little ways to cope with SYN floods.
We no longer need to reflect incoming skb queue mappings, and instead
can pick a TX queue based on cpu cooking the SYNACK, with normal XPS
affinities.
Note that all SYNACK retransmits were picking TX queue 0, this no longer
is a win given that SYNACK rtx are now distributed on all cpus.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
951b5d959f
Коммит
dc6ef6be52
|
@ -1716,7 +1716,7 @@ struct tcp_request_sock_ops {
|
||||||
__u32 (*init_seq)(const struct sk_buff *skb);
|
__u32 (*init_seq)(const struct sk_buff *skb);
|
||||||
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
|
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
|
||||||
struct flowi *fl, struct request_sock *req,
|
struct flowi *fl, struct request_sock *req,
|
||||||
u16 queue_mapping, struct tcp_fastopen_cookie *foc,
|
struct tcp_fastopen_cookie *foc,
|
||||||
bool attach_req);
|
bool attach_req);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1596,7 +1596,6 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
||||||
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
|
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
|
||||||
arg->csum));
|
arg->csum));
|
||||||
nskb->ip_summed = CHECKSUM_NONE;
|
nskb->ip_summed = CHECKSUM_NONE;
|
||||||
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
|
|
||||||
ip_push_pending_frames(sk, &fl4);
|
ip_push_pending_frames(sk, &fl4);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -6236,7 +6236,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||||
}
|
}
|
||||||
if (fastopen_sk) {
|
if (fastopen_sk) {
|
||||||
af_ops->send_synack(fastopen_sk, dst, &fl, req,
|
af_ops->send_synack(fastopen_sk, dst, &fl, req,
|
||||||
skb_get_queue_mapping(skb), &foc, false);
|
&foc, false);
|
||||||
/* Add the child socket directly into the accept queue */
|
/* Add the child socket directly into the accept queue */
|
||||||
inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
|
inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
|
||||||
sk->sk_data_ready(sk);
|
sk->sk_data_ready(sk);
|
||||||
|
@ -6247,7 +6247,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||||
if (!want_cookie)
|
if (!want_cookie)
|
||||||
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
|
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
|
||||||
af_ops->send_synack(sk, dst, &fl, req,
|
af_ops->send_synack(sk, dst, &fl, req,
|
||||||
skb_get_queue_mapping(skb), &foc, !want_cookie);
|
&foc, !want_cookie);
|
||||||
if (want_cookie)
|
if (want_cookie)
|
||||||
goto drop_and_free;
|
goto drop_and_free;
|
||||||
}
|
}
|
||||||
|
|
|
@ -821,7 +821,6 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||||
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||||
struct flowi *fl,
|
struct flowi *fl,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
u16 queue_mapping,
|
|
||||||
struct tcp_fastopen_cookie *foc,
|
struct tcp_fastopen_cookie *foc,
|
||||||
bool attach_req)
|
bool attach_req)
|
||||||
{
|
{
|
||||||
|
@ -839,7 +838,6 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||||
if (skb) {
|
if (skb) {
|
||||||
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
|
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
|
||||||
|
|
||||||
skb_set_queue_mapping(skb, queue_mapping);
|
|
||||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||||
ireq->ir_rmt_addr,
|
ireq->ir_rmt_addr,
|
||||||
ireq->opt);
|
ireq->opt);
|
||||||
|
|
|
@ -3518,7 +3518,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
tcp_rsk(req)->txhash = net_tx_rndhash();
|
tcp_rsk(req)->txhash = net_tx_rndhash();
|
||||||
res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL, true);
|
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
||||||
|
|
|
@ -437,7 +437,6 @@ out:
|
||||||
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||||
struct flowi *fl,
|
struct flowi *fl,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
u16 queue_mapping,
|
|
||||||
struct tcp_fastopen_cookie *foc,
|
struct tcp_fastopen_cookie *foc,
|
||||||
bool attach_req)
|
bool attach_req)
|
||||||
{
|
{
|
||||||
|
@ -462,7 +461,6 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||||
if (np->repflow && ireq->pktopts)
|
if (np->repflow && ireq->pktopts)
|
||||||
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
|
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
|
||||||
|
|
||||||
skb_set_queue_mapping(skb, queue_mapping);
|
|
||||||
err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
|
err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
|
||||||
err = net_xmit_eval(err);
|
err = net_xmit_eval(err);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче