fq_codel: Avoid regenerating skb flow hash unless necessary
The fq_codel qdisc currently always regenerates the skb flow hash. This wastes some cycles and prevents flow seperation in cases where the traffic has been encrypted and can no longer be understood by the flow dissector. Change it to use the prexisting flow hash if one exists, and only regenerate if necessary. Signed-off-by: Andrew Collins <acollins@cradlepoint.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
1f6cc07e17
Коммит
264b87fa61
|
@ -57,7 +57,6 @@ struct fq_codel_sched_data {
|
|||
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
|
||||
u32 *backlogs; /* backlog table [flows_cnt] */
|
||||
u32 flows_cnt; /* number of flows */
|
||||
u32 perturbation; /* hash perturbation */
|
||||
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
|
||||
u32 drop_batch_size;
|
||||
u32 memory_limit;
|
||||
|
@ -75,9 +74,7 @@ struct fq_codel_sched_data {
|
|||
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 hash = skb_get_hash_perturb(skb, q->perturbation);
|
||||
|
||||
return reciprocal_scale(hash, q->flows_cnt);
|
||||
return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
|
||||
}
|
||||
|
||||
static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
@ -482,7 +479,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
|
|||
q->memory_limit = 32 << 20; /* 32 MBytes */
|
||||
q->drop_batch_size = 64;
|
||||
q->quantum = psched_mtu(qdisc_dev(sch));
|
||||
q->perturbation = prandom_u32();
|
||||
INIT_LIST_HEAD(&q->new_flows);
|
||||
INIT_LIST_HEAD(&q->old_flows);
|
||||
codel_params_init(&q->cparams);
|
||||
|
|
Загрузка…
Ссылка в новой задаче