summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorRanjit Manomohan <ranjitm@google.com>2007-07-10 22:43:16 -0700
committerDavid S. Miller <davem@davemloft.net>2007-07-10 22:43:16 -0700
commitc9726d6890f7f3a892c879e067c3ed839f61e745 (patch)
treeaaae7b138f7c409d62a1223742df38d79130acb4 /net/sched
parentc6c6e3e05c0b4349824efcdd36650e7be9d5c7c3 (diff)
[NET_SCHED]: Make HTB scheduler work with TSO.
Currently the HTB scheduler does not correctly account for TSO packets which causes large inaccuracies in the bandwidth control when using TSO. This patch allows the HTB scheduler to work with TSO enabled devices. Signed-off-by: Ranjit Manomohan <ranjitm@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_htb.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c031486b675..b417a95df32 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -129,15 +129,12 @@ struct htb_class {
/* of un.leaf originals should be done. */
};
-/* TODO: maybe compute rate when size is too large .. or drop ? */
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
int size)
{
int slot = size >> rate->rate.cell_log;
- if (slot > 255) {
- cl->xstats.giants++;
- slot = 255;
- }
+ if (slot > 255)
+ return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
return rate->data[slot];
}
@@ -606,13 +603,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->qstats.drops++;
return NET_XMIT_DROP;
} else {
- cl->bstats.packets++;
+ cl->bstats.packets +=
+ skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
cl->bstats.bytes += skb->len;
htb_activate(q, cl);
}
sch->q.qlen++;
- sch->bstats.packets++;
+ sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
sch->bstats.bytes += skb->len;
return NET_XMIT_SUCCESS;
}
@@ -661,8 +659,9 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
* In such case we remove class from event queue first.
*/
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
- int level, int bytes)
+ int level, struct sk_buff *skb)
{
+ int bytes = skb->len;
long toks, diff;
enum htb_cmode old_mode;
@@ -698,7 +697,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
/* update byte stats except for leaves which are already updated */
if (cl->level) {
cl->bstats.bytes += bytes;
- cl->bstats.packets++;
+ cl->bstats.packets += skb_is_gso(skb)?
+ skb_shinfo(skb)->gso_segs:1;
}
cl = cl->parent;
}
@@ -882,7 +882,7 @@ next:
gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
- htb_charge_class(q, cl, level, skb->len);
+ htb_charge_class(q, cl, level, skb);
}
return skb;
}