net_sched: fq: take care of throttled flows before reuse
[ Upstream commit7df40c2673
] Normally, a socket can not be freed/reused unless all its TX packets left qdisc and were TX-completed. However connect(AF_UNSPEC) allows this to happen. With commitfc59d5bdf1
("pkt_sched: fq: clear time_next_packet for reused flows") we cleared f->time_next_packet but took no special action if the flow was still in the throttled rb-tree. Since f->time_next_packet is the key used in the rb-tree searches, blindly clearing it might break rb-tree integrity. We need to make sure the flow is no longer in the rb-tree to avoid this problem. Fixes:fc59d5bdf1
("pkt_sched: fq: clear time_next_packet for reused flows") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
6a5b0444e7
Коммит
b2a4d52fae
|
@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
|
|||
return f->next == &detached;
|
||||
}
|
||||
|
||||
static bool fq_flow_is_throttled(const struct fq_flow *f)
|
||||
{
|
||||
return f->next == &throttled;
|
||||
}
|
||||
|
||||
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
|
||||
{
|
||||
if (head->first)
|
||||
head->last->next = flow;
|
||||
else
|
||||
head->first = flow;
|
||||
head->last = flow;
|
||||
flow->next = NULL;
|
||||
}
|
||||
|
||||
static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
||||
{
|
||||
rb_erase(&f->rate_node, &q->delayed);
|
||||
q->throttled_flows--;
|
||||
fq_flow_add_tail(&q->old_flows, f);
|
||||
}
|
||||
|
||||
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
||||
{
|
||||
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
|
||||
|
@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
|||
|
||||
static struct kmem_cache *fq_flow_cachep __read_mostly;
|
||||
|
||||
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
|
||||
{
|
||||
if (head->first)
|
||||
head->last->next = flow;
|
||||
else
|
||||
head->first = flow;
|
||||
head->last = flow;
|
||||
flow->next = NULL;
|
||||
}
|
||||
|
||||
/* limit number of collected flows per round */
|
||||
#define FQ_GC_MAX 8
|
||||
|
@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
|
|||
f->socket_hash != sk->sk_hash)) {
|
||||
f->credit = q->initial_quantum;
|
||||
f->socket_hash = sk->sk_hash;
|
||||
if (fq_flow_is_throttled(f))
|
||||
fq_flow_unset_throttled(q, f);
|
||||
f->time_next_packet = 0ULL;
|
||||
}
|
||||
return f;
|
||||
|
@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
|
|||
q->time_next_delayed_flow = f->time_next_packet;
|
||||
break;
|
||||
}
|
||||
rb_erase(p, &q->delayed);
|
||||
q->throttled_flows--;
|
||||
fq_flow_add_tail(&q->old_flows, f);
|
||||
fq_flow_unset_throttled(q, f);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче