summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2005-11-05 21:14:28 +0100
committerThomas Graf <tgr@axs.localdomain>2005-11-05 22:02:29 +0100
commitbdc450a0bb1d48144ced1f899cc8366ec8e85024 (patch)
tree77924b88ae2f9ddc702288e439756800a02988ab
parentb38c7eef7e536d12051cc3d5864032f2f907cdfe (diff)
[PKT_SCHED]: (G)RED: Introduce hard dropping
Introduces a new flag TC_RED_HARDDROP which specifies that if ECN marking is enabled packets should still be dropped once the average queue length exceeds the maximum threshold. This _may_ help to avoid global synchronisation during small bursts of peers advertising but not caring about ECN. Use this option very carefully, it does more harm than good if (qth_max - qth_min) does not cover at least two average burst cycles. The difference to the current behaviour, in which we'd run into the hard queue limit, is that due to the low pass filter of RED short bursts are less likely to cause a global synchronisation. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
-rw-r--r--include/linux/pkt_sched.h2
-rw-r--r--net/sched/sch_gred.c8
-rw-r--r--net/sched/sch_red.c8
3 files changed, 16 insertions, 2 deletions
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 0ebe320223e..e87b233615b 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -93,6 +93,7 @@ struct tc_fifo_qopt
/* PRIO section */
#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
struct tc_prio_qopt
{
@@ -169,6 +170,7 @@ struct tc_red_qopt
unsigned char Scell_log; /* cell size for idle damping */
unsigned char flags;
#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
};
struct tc_red_xstats
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 079b0a4ea1c..29a2dd9f302 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -146,6 +146,11 @@ static inline int gred_use_ecn(struct gred_sched *t)
return t->red_flags & TC_RED_ECN;
}
+static inline int gred_use_harddrop(struct gred_sched *t)
+{
+ return t->red_flags & TC_RED_HARDDROP;
+}
+
static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct gred_sched_data *q=NULL;
@@ -214,7 +219,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
case RED_HARD_MARK:
sch->qstats.overlimits++;
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
goto congestion_drop;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 0d89dee751a..dccfa44c2d7 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -51,6 +51,11 @@ static inline int red_use_ecn(struct red_sched_data *q)
return q->flags & TC_RED_ECN;
}
+static inline int red_use_harddrop(struct red_sched_data *q)
+{
+ return q->flags & TC_RED_HARDDROP;
+}
+
static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -76,7 +81,8 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
case RED_HARD_MARK:
sch->qstats.overlimits++;
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ if (red_use_harddrop(q) || !red_use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
goto congestion_drop;
}