diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-08-25 06:21:32 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-08-26 12:55:18 -0400 |
commit | 363437f40a23bacdead80bb80d08d8193a20cfce (patch) | |
tree | e5dce35d8aa3ad364dafc1274b3a4c469dbcdb88 | |
parent | 18cf1248eca3f1fc38e12b314a6cadd286260e65 (diff) |
net_sched: sfb: optimize enqueue on full queue
In case SFB queue is full (hard limit reached), there is no point
spending time to compute hash and maximum qlen/p_mark.
We instead just early drop packet.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_sfb.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 0a833d0c1f6..e83c272c032 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + if (unlikely(sch->q.qlen >= q->limit)) { + sch->qstats.overlimits++; + q->stats.queuedrop++; + goto drop; + } + if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; @@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; - if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) { + if (unlikely(minqlen >= q->max)) { sch->qstats.overlimits++; - if (minqlen >= q->max) - q->stats.bucketdrop++; - else - q->stats.queuedrop++; + q->stats.bucketdrop++; goto drop; } |