[NET_SCHED]: Make HTB scheduler work with TSO.

Currently the HTB scheduler does not correctly account for TSO packets
which causes large inaccuracies in the bandwidth control when using TSO.
This patch allows the HTB scheduler to work with TSO enabled devices.

Signed-off-by: Ranjit Manomohan <ranjitm@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ranjit Manomohan 2007-07-10 22:43:16 -07:00 коммит произвёл David S. Miller
Родитель c6c6e3e05c
Коммит c9726d6890
1 изменённых файлов: 10 добавлений и 10 удалений

Просмотреть файл

@ -129,15 +129,12 @@ struct htb_class {
/* of un.leaf originals should be done. */ /* of un.leaf originals should be done. */
}; };
/* TODO: maybe compute rate when size is too large .. or drop ? */
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
int size) int size)
{ {
int slot = size >> rate->rate.cell_log; int slot = size >> rate->rate.cell_log;
if (slot > 255) { if (slot > 255)
cl->xstats.giants++; return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
slot = 255;
}
return rate->data[slot]; return rate->data[slot];
} }
@ -606,13 +603,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->qstats.drops++; cl->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
cl->bstats.packets++; cl->bstats.packets +=
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
cl->bstats.bytes += skb->len; cl->bstats.bytes += skb->len;
htb_activate(q, cl); htb_activate(q, cl);
} }
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
sch->bstats.bytes += skb->len; sch->bstats.bytes += skb->len;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
@ -661,8 +659,9 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
* In such case we remove class from event queue first. * In such case we remove class from event queue first.
*/ */
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
int level, int bytes) int level, struct sk_buff *skb)
{ {
int bytes = skb->len;
long toks, diff; long toks, diff;
enum htb_cmode old_mode; enum htb_cmode old_mode;
@ -698,7 +697,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
/* update byte stats except for leaves which are already updated */ /* update byte stats except for leaves which are already updated */
if (cl->level) { if (cl->level) {
cl->bstats.bytes += bytes; cl->bstats.bytes += bytes;
cl->bstats.packets++; cl->bstats.packets += skb_is_gso(skb)?
skb_shinfo(skb)->gso_segs:1;
} }
cl = cl->parent; cl = cl->parent;
} }
@ -882,7 +882,7 @@ next:
gives us slightly better performance */ gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen) if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl); htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb->len); htb_charge_class(q, cl, level, skb);
} }
return skb; return skb;
} }