summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Kconfig398
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_gred.c841
-rw-r--r--net/sched/sch_netem.c122
-rw-r--r--net/sched/sch_red.c418
5 files changed, 828 insertions, 957 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 45d3bc0812c..7f34e7fd767 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -2,13 +2,15 @@
# Traffic control configuration.
#
-menuconfig NET_SCHED
+menu "QoS and/or fair queueing"
+
+config NET_SCHED
bool "QoS and/or fair queueing"
---help---
When the kernel has several packets to send out over a network
device, it has to decide which ones to send first, which ones to
- delay, and which ones to drop. This is the job of the packet
- scheduler, and several different algorithms for how to do this
+ delay, and which ones to drop. This is the job of the queueing
+ disciplines, several different algorithms for how to do this
"fairly" have been proposed.
If you say N here, you will get the standard packet scheduler, which
@@ -23,13 +25,13 @@ menuconfig NET_SCHED
To administer these schedulers, you'll need the user-level utilities
from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
That package also contains some documentation; for more, check out
- <http://snafu.freedom.org/linux2.2/iproute-notes.html>.
+ <http://linux-net.osdl.org/index.php/Iproute2>.
This Quality of Service (QoS) support will enable you to use
Differentiated Services (diffserv) and Resource Reservation Protocol
- (RSVP) on your Linux router if you also say Y to "QoS support",
- "Packet classifier API" and to some classifiers below. Documentation
- and software is at <http://diffserv.sourceforge.net/>.
+ (RSVP) on your Linux router if you also say Y to the corresponding
+ classifiers below. Documentation and software is at
+ <http://diffserv.sourceforge.net/>.
If you say Y here and to "/proc file system" below, you will be able
to read status information about packet schedulers from the file
@@ -42,7 +44,7 @@ choice
prompt "Packet scheduler clock source"
depends on NET_SCHED
default NET_SCH_CLK_JIFFIES
- help
+ ---help---
Packet schedulers need a monotonic clock that increments at a static
rate. The kernel provides several suitable interfaces, each with
different properties:
@@ -56,7 +58,7 @@ choice
config NET_SCH_CLK_JIFFIES
bool "Timer interrupt"
- help
+ ---help---
Say Y here if you want to use the timer interrupt (jiffies) as clock
source. This clock source is fast, synchronized on all processors and
handles cpu clock frequency changes, but its resolution is too low
@@ -64,7 +66,7 @@ config NET_SCH_CLK_JIFFIES
config NET_SCH_CLK_GETTIMEOFDAY
bool "gettimeofday"
- help
+ ---help---
Say Y here if you want to use gettimeofday as clock source. This clock
source has high resolution, is synchronized on all processors and
handles cpu clock frequency changes, but it is slow.
@@ -72,10 +74,12 @@ config NET_SCH_CLK_GETTIMEOFDAY
Choose this if you need a high resolution clock source but can't use
the CPU's cycle counter.
+# don't allow on SMP x86 because they can have unsynchronized TSCs.
+# gettimeofday is a good alternative
config NET_SCH_CLK_CPU
bool "CPU cycle counter"
- depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64
- help
+ depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64
+ ---help---
Say Y here if you want to use the CPU's cycle counter as clock source.
This is a cheap and high resolution clock source, but on some
architectures it is not synchronized on all processors and doesn't
@@ -93,134 +97,129 @@ config NET_SCH_CLK_CPU
endchoice
+comment "Queueing/Scheduling"
+ depends on NET_SCHED
+
config NET_SCH_CBQ
- tristate "CBQ packet scheduler"
+ tristate "Class Based Queueing (CBQ)"
depends on NET_SCHED
---help---
Say Y here if you want to use the Class-Based Queueing (CBQ) packet
- scheduling algorithm for some of your network devices. This
- algorithm classifies the waiting packets into a tree-like hierarchy
- of classes; the leaves of this tree are in turn scheduled by
- separate algorithms (called "disciplines" in this context).
+ scheduling algorithm. This algorithm classifies the waiting packets
+ into a tree-like hierarchy of classes; the leaves of this tree are
+ in turn scheduled by separate algorithms.
- See the top of <file:net/sched/sch_cbq.c> for references about the
- CBQ algorithm.
+ See the top of <file:net/sched/sch_cbq.c> for more details.
CBQ is a commonly used scheduler, so if you're unsure, you should
say Y here. Then say Y to all the queueing algorithms below that you
- want to use as CBQ disciplines. Then say Y to "Packet classifier
- API" and say Y to all the classifiers you want to use; a classifier
- is a routine that allows you to sort your outgoing traffic into
- classes based on a certain criterion.
+ want to use as leaf disciplines.
To compile this code as a module, choose M here: the
module will be called sch_cbq.
config NET_SCH_HTB
- tristate "HTB packet scheduler"
+ tristate "Hierarchical Token Bucket (HTB)"
depends on NET_SCHED
---help---
Say Y here if you want to use the Hierarchical Token Buckets (HTB)
- packet scheduling algorithm for some of your network devices. See
+ packet scheduling algorithm. See
<http://luxik.cdi.cz/~devik/qos/htb/> for complete manual and
in-depth articles.
- HTB is very similar to the CBQ regarding its goals however is has
+ HTB is very similar to CBQ regarding its goals however is has
different properties and different algorithm.
To compile this code as a module, choose M here: the
module will be called sch_htb.
config NET_SCH_HFSC
- tristate "HFSC packet scheduler"
+ tristate "Hierarchical Fair Service Curve (HFSC)"
depends on NET_SCHED
---help---
Say Y here if you want to use the Hierarchical Fair Service Curve
- (HFSC) packet scheduling algorithm for some of your network devices.
+ (HFSC) packet scheduling algorithm.
To compile this code as a module, choose M here: the
module will be called sch_hfsc.
-#tristate ' H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
config NET_SCH_ATM
- tristate "ATM pseudo-scheduler"
+ tristate "ATM Virtual Circuits (ATM)"
depends on NET_SCHED && ATM
---help---
Say Y here if you want to use the ATM pseudo-scheduler. This
- provides a framework for invoking classifiers (aka "filters"), which
- in turn select classes of this queuing discipline. Each class maps
- the flow(s) it is handling to a given virtual circuit (see the top of
- <file:net/sched/sch_atm.c>).
+ provides a framework for invoking classifiers, which in turn
+ select classes of this queuing discipline. Each class maps
+ the flow(s) it is handling to a given virtual circuit.
+
+ See the top of <file:net/sched/sch_atm.c>) for more details.
To compile this code as a module, choose M here: the
module will be called sch_atm.
config NET_SCH_PRIO
- tristate "The simplest PRIO pseudoscheduler"
+ tristate "Multi Band Priority Queueing (PRIO)"
depends on NET_SCHED
- help
+ ---help---
Say Y here if you want to use an n-band priority queue packet
- "scheduler" for some of your network devices or as a leaf discipline
- for the CBQ scheduling algorithm. If unsure, say Y.
+ scheduler.
To compile this code as a module, choose M here: the
module will be called sch_prio.
config NET_SCH_RED
- tristate "RED queue"
+ tristate "Random Early Detection (RED)"
depends on NET_SCHED
- help
+ ---help---
Say Y here if you want to use the Random Early Detection (RED)
- packet scheduling algorithm for some of your network devices (see
- the top of <file:net/sched/sch_red.c> for details and references
- about the algorithm).
+ packet scheduling algorithm.
+
+ See the top of <file:net/sched/sch_red.c> for more details.
To compile this code as a module, choose M here: the
module will be called sch_red.
config NET_SCH_SFQ
- tristate "SFQ queue"
+ tristate "Stochastic Fairness Queueing (SFQ)"
depends on NET_SCHED
---help---
Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
- packet scheduling algorithm for some of your network devices or as a
- leaf discipline for the CBQ scheduling algorithm (see the top of
- <file:net/sched/sch_sfq.c> for details and references about the SFQ
- algorithm).
+ packet scheduling algorithm .
+
+ See the top of <file:net/sched/sch_sfq.c> for more details.
To compile this code as a module, choose M here: the
module will be called sch_sfq.
config NET_SCH_TEQL
- tristate "TEQL queue"
+ tristate "True Link Equalizer (TEQL)"
depends on NET_SCHED
---help---
Say Y here if you want to use the True Link Equalizer (TLE) packet
- scheduling algorithm for some of your network devices or as a leaf
- discipline for the CBQ scheduling algorithm. This queueing
- discipline allows the combination of several physical devices into
- one virtual device. (see the top of <file:net/sched/sch_teql.c> for
- details).
+ scheduling algorithm. This queueing discipline allows the combination
+ of several physical devices into one virtual device.
+
+ See the top of <file:net/sched/sch_teql.c> for more details.
To compile this code as a module, choose M here: the
module will be called sch_teql.
config NET_SCH_TBF
- tristate "TBF queue"
+ tristate "Token Bucket Filter (TBF)"
depends on NET_SCHED
- help
- Say Y here if you want to use the Simple Token Bucket Filter (TBF)
- packet scheduling algorithm for some of your network devices or as a
- leaf discipline for the CBQ scheduling algorithm (see the top of
- <file:net/sched/sch_tbf.c> for a description of the TBF algorithm).
+ ---help---
+ Say Y here if you want to use the Token Bucket Filter (TBF) packet
+ scheduling algorithm.
+
+ See the top of <file:net/sched/sch_tbf.c> for more details.
To compile this code as a module, choose M here: the
module will be called sch_tbf.
config NET_SCH_GRED
- tristate "GRED queue"
+ tristate "Generic Random Early Detection (GRED)"
depends on NET_SCHED
- help
+ ---help---
Say Y here if you want to use the Generic Random Early Detection
(GRED) packet scheduling algorithm for some of your network devices
(see the top of <file:net/sched/sch_red.c> for details and
@@ -230,9 +229,9 @@ config NET_SCH_GRED
module will be called sch_gred.
config NET_SCH_DSMARK
- tristate "Diffserv field marker"
+ tristate "Differentiated Services marker (DSMARK)"
depends on NET_SCHED
- help
+ ---help---
Say Y if you want to schedule packets according to the
Differentiated Services architecture proposed in RFC 2475.
Technical information on this method, with pointers to associated
@@ -242,9 +241,9 @@ config NET_SCH_DSMARK
module will be called sch_dsmark.
config NET_SCH_NETEM
- tristate "Network emulator"
+ tristate "Network emulator (NETEM)"
depends on NET_SCHED
- help
+ ---help---
Say Y if you want to emulate network delay, loss, and packet
re-ordering. This is often useful to simulate networks when
testing applications or protocols.
@@ -257,58 +256,23 @@ config NET_SCH_NETEM
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_SCHED
- help
- If you say Y here, you will be able to police incoming bandwidth
- and drop packets when this bandwidth exceeds your desired rate.
+ ---help---
+ Say Y here if you want to use classifiers for incoming packets.
If unsure, say Y.
To compile this code as a module, choose M here: the
module will be called sch_ingress.
-config NET_QOS
- bool "QoS support"
+comment "Classification"
depends on NET_SCHED
- ---help---
- Say Y here if you want to include Quality Of Service scheduling
- features, which means that you will be able to request certain
- rate-of-flow limits for your network devices.
-
- This Quality of Service (QoS) support will enable you to use
- Differentiated Services (diffserv) and Resource Reservation Protocol
- (RSVP) on your Linux router if you also say Y to "Packet classifier
- API" and to some classifiers below. Documentation and software is at
- <http://diffserv.sourceforge.net/>.
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about QoS support.
-
-config NET_ESTIMATOR
- bool "Rate estimator"
- depends on NET_QOS
- help
- In order for Quality of Service scheduling to work, the current
- rate-of-flow for a network device has to be estimated; if you say Y
- here, the kernel will do just that.
config NET_CLS
- bool "Packet classifier API"
- depends on NET_SCHED
- ---help---
- The CBQ scheduling algorithm requires that network packets which are
- scheduled to be sent out over a network device be classified
- according to some criterion. If you say Y here, you will get a
- choice of several different packet classifiers with the following
- questions.
-
- This will enable you to use Differentiated Services (diffserv) and
- Resource Reservation Protocol (RSVP) on your Linux router.
- Documentation and software is at
- <http://diffserv.sourceforge.net/>.
+ boolean
config NET_CLS_BASIC
- tristate "Basic classifier"
- depends on NET_CLS
+ tristate "Elementary classification (BASIC)"
+ depends NET_SCHED
+ select NET_CLS
---help---
Say Y here if you want to be able to classify packets using
only extended matches and actions.
@@ -317,24 +281,25 @@ config NET_CLS_BASIC
module will be called cls_basic.
config NET_CLS_TCINDEX
- tristate "TC index classifier"
- depends on NET_CLS
- help
- If you say Y here, you will be able to classify outgoing packets
- according to the tc_index field of the skb. You will want this
- feature if you want to implement Differentiated Services using
- sch_dsmark. If unsure, say Y.
+ tristate "Traffic-Control Index (TCINDEX)"
+ depends NET_SCHED
+ select NET_CLS
+ ---help---
+ Say Y here if you want to be able to classify packets based on
+ traffic control indices. You will want this feature if you want
+ to implement Differentiated Services together with DSMARK.
To compile this code as a module, choose M here: the
module will be called cls_tcindex.
config NET_CLS_ROUTE4
- tristate "Routing table based classifier"
- depends on NET_CLS
+ tristate "Routing decision (ROUTE)"
+ depends NET_SCHED
select NET_CLS_ROUTE
- help
- If you say Y here, you will be able to classify outgoing packets
- according to the route table entry they matched. If unsure, say Y.
+ select NET_CLS
+ ---help---
+ If you say Y here, you will be able to classify packets
+ according to the route table entry they matched.
To compile this code as a module, choose M here: the
module will be called cls_route.
@@ -344,58 +309,45 @@ config NET_CLS_ROUTE
default n
config NET_CLS_FW
- tristate "Firewall based classifier"
- depends on NET_CLS
- help
- If you say Y here, you will be able to classify outgoing packets
- according to firewall criteria you specified.
+ tristate "Netfilter mark (FW)"
+ depends NET_SCHED
+ select NET_CLS
+ ---help---
+ If you say Y here, you will be able to classify packets
+ according to netfilter/firewall marks.
To compile this code as a module, choose M here: the
module will be called cls_fw.
config NET_CLS_U32
- tristate "U32 classifier"
- depends on NET_CLS
- help
- If you say Y here, you will be able to classify outgoing packets
- according to their destination address. If unsure, say Y.
+ tristate "Universal 32bit comparisons w/ hashing (U32)"
+ depends NET_SCHED
+ select NET_CLS
+ ---help---
+ Say Y here to be able to classify packetes using a universal
+ 32bit pieces based comparison scheme.
To compile this code as a module, choose M here: the
module will be called cls_u32.
config CLS_U32_PERF
- bool "U32 classifier performance counters"
+ bool "Performance counters support"
depends on NET_CLS_U32
- help
- gathers stats that could be used to tune u32 classifier performance.
- Requires a new iproute2
- You MUST NOT turn this on if you dont have an update iproute2.
-
-config NET_CLS_IND
- bool "classify input device (slows things u32/fw) "
- depends on NET_CLS_U32 || NET_CLS_FW
- help
- This option will be killed eventually when a
- metadata action appears because it slows things a little
- Available only for u32 and fw classifiers.
- Requires a new iproute2
- You MUST NOT turn this on if you dont have an update iproute2.
+ ---help---
+ Say Y here to make u32 gather additional statistics useful for
+ fine tuning u32 classifiers.
config CLS_U32_MARK
- bool "Use nfmark as a key in U32 classifier"
+ bool "Netfilter marks support"
depends on NET_CLS_U32 && NETFILTER
- help
- This allows you to match mark in a u32 filter.
- Example:
- tc filter add dev eth0 protocol ip parent 1:0 prio 5 u32 \
- match mark 0x0090 0xffff \
- match ip dst 4.4.4.4 \
- flowid 1:90
- You must use a new iproute2 to use this feature.
+ ---help---
+ Say Y here to be able to use netfilter marks as u32 key.
config NET_CLS_RSVP
- tristate "Special RSVP classifier"
- depends on NET_CLS && NET_QOS
+ tristate "IPv4 Resource Reservation Protocol (RSVP)"
+ depends on NET_SCHED
+ select NET_CLS
+ select NET_ESTIMATOR
---help---
The Resource Reservation Protocol (RSVP) permits end systems to
request a minimum and maximum data flow rate for a connection; this
@@ -408,31 +360,33 @@ config NET_CLS_RSVP
module will be called cls_rsvp.
config NET_CLS_RSVP6
- tristate "Special RSVP classifier for IPv6"
- depends on NET_CLS && NET_QOS
+ tristate "IPv6 Resource Reservation Protocol (RSVP6)"
+ depends on NET_SCHED
+ select NET_CLS
+ select NET_ESTIMATOR
---help---
The Resource Reservation Protocol (RSVP) permits end systems to
request a minimum and maximum data flow rate for a connection; this
is important for real time data such as streaming sound or video.
Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests and you are using the new Internet Protocol
- IPv6 as opposed to the older and more common IPv4.
+ on their RSVP requests and you are using the IPv6.
To compile this code as a module, choose M here: the
module will be called cls_rsvp6.
config NET_EMATCH
bool "Extended Matches"
- depends on NET_CLS
+ depends NET_SCHED
+ select NET_CLS
---help---
Say Y here if you want to use extended matches on top of classifiers
and select the extended matches below.
Extended matches are small classification helpers not worth writing
- a separate classifier.
+ a separate classifier for.
- You must have a recent version of the iproute2 tools in order to use
+ A recent version of the iproute2 package is required to use
extended matches.
config NET_EMATCH_STACK
@@ -466,7 +420,7 @@ config NET_EMATCH_NBYTE
module will be called em_nbyte.
config NET_EMATCH_U32
- tristate "U32 hashing key"
+ tristate "U32 key"
depends on NET_EMATCH
---help---
Say Y here if you want to be able to classify packets using
@@ -494,76 +448,120 @@ config NET_EMATCH_TEXT
select TEXTSEARCH_BM
select TEXTSEARCH_FSM
---help---
- Say Y here if you want to be ablt to classify packets based on
+ Say Y here if you want to be able to classify packets based on
textsearch comparisons.
To compile this code as a module, choose M here: the
module will be called em_text.
config NET_CLS_ACT
- bool "Packet ACTION"
- depends on EXPERIMENTAL && NET_CLS && NET_QOS
+ bool "Actions"
+ depends on EXPERIMENTAL && NET_SCHED
+ select NET_ESTIMATOR
---help---
- This option requires you have a new iproute2. It enables
- tc extensions which can be used with tc classifiers.
- You MUST NOT turn this on if you dont have an update iproute2.
+ Say Y here if you want to use traffic control actions. Actions
+ get attached to classifiers and are invoked after a successful
+ classification. They are used to overwrite the classification
+ result, instantly drop or redirect packets, etc.
+
+ A recent version of the iproute2 package is required to use
+ extended matches.
config NET_ACT_POLICE
- tristate "Policing Actions"
+ tristate "Traffic Policing"
depends on NET_CLS_ACT
---help---
- If you are using a newer iproute2 select this one, otherwise use one
- below to select a policer.
- You MUST NOT turn this on if you dont have an update iproute2.
+ Say Y here if you want to do traffic policing, i.e. strict
+ bandwidth limiting. This action replaces the existing policing
+ module.
+
+ To compile this code as a module, choose M here: the
+ module will be called police.
config NET_ACT_GACT
- tristate "generic Actions"
+ tristate "Generic actions"
depends on NET_CLS_ACT
---help---
- You must have new iproute2 to use this feature.
- This adds simple filtering actions like drop, accept etc.
+ Say Y here to take generic actions such as dropping and
+ accepting packets.
+
+ To compile this code as a module, choose M here: the
+ module will be called gact.
config GACT_PROB
- bool "generic Actions probability"
+ bool "Probability support"
depends on NET_ACT_GACT
---help---
- Allows generic actions to be randomly or deterministically used.
+ Say Y here to use the generic action randomly or deterministically.
config NET_ACT_MIRRED
- tristate "Packet In/Egress redirecton/mirror Actions"
+ tristate "Redirecting and Mirroring"
depends on NET_CLS_ACT
---help---
- requires new iproute2
- This allows packets to be mirrored or redirected to netdevices
+ Say Y here to allow packets to be mirrored or redirected to
+ other devices.
+
+ To compile this code as a module, choose M here: the
+ module will be called mirred.
config NET_ACT_IPT
- tristate "iptables Actions"
+ tristate "IPtables targets"
depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
---help---
- requires new iproute2
- This allows iptables targets to be used by tc filters
+ Say Y here to be able to invoke iptables targets after succesful
+ classification.
+
+ To compile this code as a module, choose M here: the
+ module will be called ipt.
config NET_ACT_PEDIT
- tristate "Generic Packet Editor Actions"
+ tristate "Packet Editing"
depends on NET_CLS_ACT
---help---
- requires new iproute2
- This allows for packets to be generically edited
+ Say Y here if you want to mangle the content of packets.
-config NET_CLS_POLICE
- bool "Traffic policing (needed for in/egress)"
- depends on NET_CLS && NET_QOS && NET_CLS_ACT!=y
- help
- Say Y to support traffic policing (bandwidth limits). Needed for
- ingress and egress rate limiting.
+ To compile this code as a module, choose M here: the
+ module will be called pedit.
config NET_ACT_SIMP
- tristate "Simple action"
+ tristate "Simple Example (Debug)"
depends on NET_CLS_ACT
---help---
- You must have new iproute2 to use this feature.
- This adds a very simple action for demonstration purposes
- The idea is to give action authors a basic example to look at.
- All this action will do is print on the console the configured
- policy string followed by _ then packet count.
+ Say Y here to add a simple action for demonstration purposes.
+ It is meant as an example and for debugging purposes. It will
+ print a configured policy string followed by the packet count
+ to the console for every packet that passes by.
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called simple.
+
+config NET_CLS_POLICE
+ bool "Traffic Policing (obsolete)"
+ depends on NET_SCHED && NET_CLS_ACT!=y
+ select NET_ESTIMATOR
+ ---help---
+ Say Y here if you want to do traffic policing, i.e. strict
+ bandwidth limiting. This option is obsoleted by the traffic
+ policer implemented as action, it stays here for compatibility
+ reasons.
+
+config NET_CLS_IND
+ bool "Incoming device classification"
+ depends on NET_SCHED && (NET_CLS_U32 || NET_CLS_FW)
+ ---help---
+ Say Y here to extend the u32 and fw classifier to support
+ classification based on the incoming device. This option is
+ likely to disappear in favour of the metadata ematch.
+
+config NET_ESTIMATOR
+ bool "Rate estimator"
+ depends on NET_SCHED
+ ---help---
+ Say Y here to allow using rate estimators to estimate the current
+ rate-of-flow for network devices, queues, etc. This module is
+ automaticaly selected if needed but can be selected manually for
+ statstical purposes.
+endmenu
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 00eae5f9a01..cf68a59fdc5 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -393,10 +393,10 @@ META_COLLECTOR(int_sk_route_caps)
dst->value = skb->sk->sk_route_caps;
}
-META_COLLECTOR(int_sk_hashent)
+META_COLLECTOR(int_sk_hash)
{
SKIP_NONLOCAL(skb);
- dst->value = skb->sk->sk_hashent;
+ dst->value = skb->sk->sk_hash;
}
META_COLLECTOR(int_sk_lingertime)
@@ -515,7 +515,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
[META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
[META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps),
- [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent),
+ [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
[META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
[META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
[META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 25c171c3271..29a2dd9f302 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -15,247 +15,281 @@
* from Ren Liu
* - More error checks
*
- *
- *
- * For all the glorious comments look at Alexey's sch_red.c
+ * For all the glorious comments look at include/net/red.h
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
+#include <net/red.h>
-#if 1 /* control */
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-#if 0 /* data */
-#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define D2PRINTK(format,args...)
-#endif
+#define GRED_DEF_PRIO (MAX_DPs / 2)
+#define GRED_VQ_MASK (MAX_DPs - 1)
struct gred_sched_data;
struct gred_sched;
struct gred_sched_data
{
-/* Parameters */
u32 limit; /* HARD maximal queue length */
- u32 qth_min; /* Min average length threshold: A scaled */
- u32 qth_max; /* Max average length threshold: A scaled */
u32 DP; /* the drop pramaters */
- char Wlog; /* log(W) */
- char Plog; /* random number bits */
- u32 Scell_max;
- u32 Rmask;
u32 bytesin; /* bytes seen on virtualQ so far*/
u32 packetsin; /* packets seen on virtualQ so far*/
u32 backlog; /* bytes on the virtualQ */
- u32 forced; /* packets dropped for exceeding limits */
- u32 early; /* packets dropped as a warning */
- u32 other; /* packets dropped by invoking drop() */
- u32 pdrop; /* packets dropped because we exceeded physical queue limits */
- char Scell_log;
- u8 Stab[256];
- u8 prio; /* the prio of this vq */
-
-/* Variables */
- unsigned long qave; /* Average queue length: A scaled */
- int qcount; /* Packets since last random number generation */
- u32 qR; /* Cached random number */
-
- psched_time_t qidlestart; /* Start of idle period */
+ u8 prio; /* the prio of this vq */
+
+ struct red_parms parms;
+ struct red_stats stats;
+};
+
+enum {
+ GRED_WRED_MODE = 1,
+ GRED_RIO_MODE,
};
struct gred_sched
{
struct gred_sched_data *tab[MAX_DPs];
- u32 DPs;
- u32 def;
- u8 initd;
- u8 grio;
- u8 eqp;
+ unsigned long flags;
+ u32 red_flags;
+ u32 DPs;
+ u32 def;
+ struct red_parms wred_set;
};
-static int
-gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static inline int gred_wred_mode(struct gred_sched *table)
{
- psched_time_t now;
- struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
- unsigned long qave=0;
- int i=0;
+ return test_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline void gred_enable_wred_mode(struct gred_sched *table)
+{
+ __set_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline void gred_disable_wred_mode(struct gred_sched *table)
+{
+ __clear_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline int gred_rio_mode(struct gred_sched *table)
+{
+ return test_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline void gred_enable_rio_mode(struct gred_sched *table)
+{
+ __set_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline void gred_disable_rio_mode(struct gred_sched *table)
+{
+ __clear_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline int gred_wred_mode_check(struct Qdisc *sch)
+{
+ struct gred_sched *table = qdisc_priv(sch);
+ int i;
- if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
- D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
- goto do_enqueue;
+ /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
+ for (i = 0; i < table->DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+ int n;
+
+ if (q == NULL)
+ continue;
+
+ for (n = 0; n < table->DPs; n++)
+ if (table->tab[n] && table->tab[n] != q &&
+ table->tab[n]->prio == q->prio)
+ return 1;
}
+ return 0;
+}
+
+static inline unsigned int gred_backlog(struct gred_sched *table,
+ struct gred_sched_data *q,
+ struct Qdisc *sch)
+{
+ if (gred_wred_mode(table))
+ return sch->qstats.backlog;
+ else
+ return q->backlog;
+}
+
+static inline u16 tc_index_to_dp(struct sk_buff *skb)
+{
+ return skb->tc_index & GRED_VQ_MASK;
+}
+
+static inline void gred_load_wred_set(struct gred_sched *table,
+ struct gred_sched_data *q)
+{
+ q->parms.qavg = table->wred_set.qavg;
+ q->parms.qidlestart = table->wred_set.qidlestart;
+}
+
+static inline void gred_store_wred_set(struct gred_sched *table,
+ struct gred_sched_data *q)
+{
+ table->wred_set.qavg = q->parms.qavg;
+}
+
+static inline int gred_use_ecn(struct gred_sched *t)
+{
+ return t->red_flags & TC_RED_ECN;
+}
- if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
- printk("GRED: setting to default (%d)\n ",t->def);
- if (!(q=t->tab[t->def])) {
- DPRINTK("GRED: setting to default FAILED! dropping!! "
- "(%d)\n ", t->def);
- goto drop;
+static inline int gred_use_harddrop(struct gred_sched *t)
+{
+ return t->red_flags & TC_RED_HARDDROP;
+}
+
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct gred_sched_data *q=NULL;
+ struct gred_sched *t= qdisc_priv(sch);
+ unsigned long qavg = 0;
+ u16 dp = tc_index_to_dp(skb);
+
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ dp = t->def;
+
+ if ((q = t->tab[dp]) == NULL) {
+ /* Pass through packets not assigned to a DP
+ * if no default DP has been configured. This
+ * allows for DP flows to be left untouched.
+ */
+ if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
+ return qdisc_enqueue_tail(skb, sch);
+ else
+ goto drop;
}
+
/* fix tc_index? --could be controvesial but needed for
requeueing */
- skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
+ skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
}
- D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
- "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
- sch->qstats.backlog);
- /* sum up all the qaves of prios <= to ours to get the new qave*/
- if (!t->eqp && t->grio) {
- for (i=0;i<t->DPs;i++) {
- if ((!t->tab[i]) || (i==q->DP))
- continue;
-
- if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
- qave +=t->tab[i]->qave;
+ /* sum up all the qaves of prios <= to ours to get the new qave */
+ if (!gred_wred_mode(t) && gred_rio_mode(t)) {
+ int i;
+
+ for (i = 0; i < t->DPs; i++) {
+ if (t->tab[i] && t->tab[i]->prio < q->prio &&
+ !red_is_idling(&t->tab[i]->parms))
+ qavg +=t->tab[i]->parms.qavg;
}
-
+
}
q->packetsin++;
- q->bytesin+=skb->len;
+ q->bytesin += skb->len;
- if (t->eqp && t->grio) {
- qave=0;
- q->qave=t->tab[t->def]->qave;
- q->qidlestart=t->tab[t->def]->qidlestart;
- }
+ if (gred_wred_mode(t))
+ gred_load_wred_set(t, q);
- if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
- long us_idle;
- PSCHED_GET_TIME(now);
- us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
- PSCHED_SET_PASTPERFECT(q->qidlestart);
+ q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
- q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
- } else {
- if (t->eqp) {
- q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
- } else {
- q->qave += q->backlog - (q->qave >> q->Wlog);
- }
+ if (red_is_idling(&q->parms))
+ red_end_of_idle_period(&q->parms);
- }
-
-
- if (t->eqp && t->grio)
- t->tab[t->def]->qave=q->qave;
-
- if ((q->qave+qave) < q->qth_min) {
- q->qcount = -1;
-enqueue:
- if (q->backlog + skb->len <= q->limit) {
- q->backlog += skb->len;
-do_enqueue:
- __skb_queue_tail(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return 0;
- } else {
- q->pdrop++;
- }
+ if (gred_wred_mode(t))
+ gred_store_wred_set(t, q);
-drop:
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
- if ((q->qave+qave) >= q->qth_max) {
- q->qcount = -1;
- sch->qstats.overlimits++;
- q->forced++;
- goto drop;
+ switch (red_action(&q->parms, q->parms.qavg + qavg)) {
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+ q->stats.forced_mark++;
+ break;
}
- if (++q->qcount) {
- if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
- goto enqueue;
- q->qcount = 0;
- q->qR = net_random()&q->Rmask;
- sch->qstats.overlimits++;
- q->early++;
- goto drop;
+
+ if (q->backlog + skb->len <= q->limit) {
+ q->backlog += skb->len;
+ return qdisc_enqueue_tail(skb, sch);
}
- q->qR = net_random()&q->Rmask;
- goto enqueue;
+
+ q->stats.pdrop++;
+drop:
+ return qdisc_drop(skb, sch);
+
+congestion_drop:
+ qdisc_drop(skb, sch);
+ return NET_XMIT_CN;
}
-static int
-gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
+ struct gred_sched *t = qdisc_priv(sch);
struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
- q= t->tab[(skb->tc_index&0xf)];
-/* error checking here -- probably unnecessary */
- PSCHED_SET_PASTPERFECT(q->qidlestart);
-
- __skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->qstats.requeues++;
- q->backlog += skb->len;
- return 0;
+ u16 dp = tc_index_to_dp(skb);
+
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
+ "for requeue, screwing up backlog.\n",
+ tc_index_to_dp(skb));
+ } else {
+ if (red_is_idling(&q->parms))
+ red_end_of_idle_period(&q->parms);
+ q->backlog += skb->len;
+ }
+
+ return qdisc_requeue(skb, sch);
}
-static struct sk_buff *
-gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t = qdisc_priv(sch);
+
+ skb = qdisc_dequeue_head(sch);
- skb = __skb_dequeue(&sch->q);
if (skb) {
- sch->qstats.backlog -= skb->len;
- q= t->tab[(skb->tc_index&0xf)];
- if (q) {
- q->backlog -= skb->len;
- if (!q->backlog && !t->eqp)
- PSCHED_GET_TIME(q->qidlestart);
+ struct gred_sched_data *q;
+ u16 dp = tc_index_to_dp(skb);
+
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GRED: Unable to relocate "
+ "VQ 0x%x after dequeue, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
- D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
+ q->backlog -= skb->len;
+
+ if (!q->backlog && !gred_wred_mode(t))
+ red_start_of_idle_period(&q->parms);
}
+
return skb;
}
- if (t->eqp) {
- q= t->tab[t->def];
- if (!q)
- D2PRINTK("no default VQ set: Results will be "
- "screwed up\n");
- else
- PSCHED_GET_TIME(q->qidlestart);
- }
+ if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
+ red_start_of_idle_period(&t->wred_set);
return NULL;
}
@@ -263,36 +297,34 @@ gred_dequeue(struct Qdisc* sch)
static unsigned int gred_drop(struct Qdisc* sch)
{
struct sk_buff *skb;
+ struct gred_sched *t = qdisc_priv(sch);
- struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
-
- skb = __skb_dequeue_tail(&sch->q);
+ skb = qdisc_dequeue_tail(sch);
if (skb) {
unsigned int len = skb->len;
- sch->qstats.backlog -= len;
- sch->qstats.drops++;
- q= t->tab[(skb->tc_index&0xf)];
- if (q) {
- q->backlog -= len;
- q->other++;
- if (!q->backlog && !t->eqp)
- PSCHED_GET_TIME(q->qidlestart);
+ struct gred_sched_data *q;
+ u16 dp = tc_index_to_dp(skb);
+
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GRED: Unable to relocate "
+ "VQ 0x%x while dropping, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
- D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
+ q->backlog -= len;
+ q->stats.other++;
+
+ if (!q->backlog && !gred_wred_mode(t))
+ red_start_of_idle_period(&q->parms);
}
- kfree_skb(skb);
+ qdisc_drop(skb, sch);
return len;
}
- q=t->tab[t->def];
- if (!q) {
- D2PRINTK("no default VQ set: Results might be screwed up\n");
- return 0;
- }
+ if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
+ red_start_of_idle_period(&t->wred_set);
- PSCHED_GET_TIME(q->qidlestart);
return 0;
}
@@ -300,293 +332,241 @@ static unsigned int gred_drop(struct Qdisc* sch)
static void gred_reset(struct Qdisc* sch)
{
int i;
- struct gred_sched_data *q;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched *t = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
- __skb_queue_purge(&sch->q);
+ for (i = 0; i < t->DPs; i++) {
+ struct gred_sched_data *q = t->tab[i];
- sch->qstats.backlog = 0;
+ if (!q)
+ continue;
- for (i=0;i<t->DPs;i++) {
- q= t->tab[i];
- if (!q)
- continue;
- PSCHED_SET_PASTPERFECT(q->qidlestart);
- q->qave = 0;
- q->qcount = -1;
+ red_restart(&q->parms);
q->backlog = 0;
- q->other=0;
- q->forced=0;
- q->pdrop=0;
- q->early=0;
}
}
-static int gred_change(struct Qdisc *sch, struct rtattr *opt)
+static inline void gred_destroy_vq(struct gred_sched_data *q)
+{
+ kfree(q);
+}
+
+static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
{
struct gred_sched *table = qdisc_priv(sch);
- struct gred_sched_data *q;
- struct tc_gred_qopt *ctl;
struct tc_gred_sopt *sopt;
- struct rtattr *tb[TCA_GRED_STAB];
- struct rtattr *tb2[TCA_GRED_DPS];
int i;
- if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
+ if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
return -EINVAL;
- if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
- rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
+ sopt = RTA_DATA(dps);
+
+ if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+ return -EINVAL;
- if (tb2[TCA_GRED_DPS-1] == 0)
- return -EINVAL;
+ sch_tree_lock(sch);
+ table->DPs = sopt->DPs;
+ table->def = sopt->def_DP;
+ table->red_flags = sopt->flags;
+
+ /*
+ * Every entry point to GRED is synchronized with the above code
+ * and the DP is checked against DPs, i.e. shadowed VQs can no
+ * longer be found so we can unlock right here.
+ */
+ sch_tree_unlock(sch);
+
+ if (sopt->grio) {
+ gred_enable_rio_mode(table);
+ gred_disable_wred_mode(table);
+ if (gred_wred_mode_check(sch))
+ gred_enable_wred_mode(table);
+ } else {
+ gred_disable_rio_mode(table);
+ gred_disable_wred_mode(table);
+ }
- sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
- table->DPs=sopt->DPs;
- table->def=sopt->def_DP;
- table->grio=sopt->grio;
- table->initd=0;
- /* probably need to clear all the table DP entries as well */
- return 0;
- }
+ for (i = table->DPs; i < MAX_DPs; i++) {
+ if (table->tab[i]) {
+ printk(KERN_WARNING "GRED: Warning: Destroying "
+ "shadowed VQ 0x%x\n", i);
+ gred_destroy_vq(table->tab[i]);
+ table->tab[i] = NULL;
+ }
+ }
+ return 0;
+}
- if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
- RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
- RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
- return -EINVAL;
+static inline int gred_change_vq(struct Qdisc *sch, int dp,
+ struct tc_gred_qopt *ctl, int prio, u8 *stab)
+{
+ struct gred_sched *table = qdisc_priv(sch);
+ struct gred_sched_data *q;
- ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
- if (ctl->DP > MAX_DPs-1 ) {
- /* misbehaving is punished! Put in the default drop probability */
- DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
- "set to default at %d\n",ctl->DP,table->def);
- ctl->DP=table->def;
- }
-
- if (table->tab[ctl->DP] == NULL) {
- table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
- GFP_KERNEL);
- if (NULL == table->tab[ctl->DP])
+ if (table->tab[dp] == NULL) {
+ table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
+ if (table->tab[dp] == NULL)
return -ENOMEM;
- memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
- }
- q= table->tab[ctl->DP];
-
- if (table->grio) {
- if (ctl->prio <=0) {
- if (table->def && table->tab[table->def]) {
- DPRINTK("\nGRED: DP %u does not have a prio"
- "setting default to %d\n",ctl->DP,
- table->tab[table->def]->prio);
- q->prio=table->tab[table->def]->prio;
- } else {
- DPRINTK("\nGRED: DP %u does not have a prio"
- " setting default to 8\n",ctl->DP);
- q->prio=8;
- }
- } else {
- q->prio=ctl->prio;
- }
- } else {
- q->prio=8;
+ memset(table->tab[dp], 0, sizeof(*q));
}
-
- q->DP=ctl->DP;
- q->Wlog = ctl->Wlog;
- q->Plog = ctl->Plog;
+ q = table->tab[dp];
+ q->DP = dp;
+ q->prio = prio;
q->limit = ctl->limit;
- q->Scell_log = ctl->Scell_log;
- q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
- q->Scell_max = (255<<q->Scell_log);
- q->qth_min = ctl->qth_min<<ctl->Wlog;
- q->qth_max = ctl->qth_max<<ctl->Wlog;
- q->qave=0;
- q->backlog=0;
- q->qcount = -1;
- q->other=0;
- q->forced=0;
- q->pdrop=0;
- q->early=0;
-
- PSCHED_SET_PASTPERFECT(q->qidlestart);
- memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
-
- if ( table->initd && table->grio) {
- /* this looks ugly but it's not in the fast path */
- for (i=0;i<table->DPs;i++) {
- if ((!table->tab[i]) || (i==q->DP) )
- continue;
- if (table->tab[i]->prio == q->prio ){
- /* WRED mode detected */
- table->eqp=1;
- break;
- }
- }
- }
- if (!table->initd) {
- table->initd=1;
- /*
- the first entry also goes into the default until
- over-written
- */
-
- if (table->tab[table->def] == NULL) {
- table->tab[table->def]=
- kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
- if (NULL == table->tab[table->def])
- return -ENOMEM;
-
- memset(table->tab[table->def], 0,
- (sizeof(struct gred_sched_data)));
- }
- q= table->tab[table->def];
- q->DP=table->def;
- q->Wlog = ctl->Wlog;
- q->Plog = ctl->Plog;
- q->limit = ctl->limit;
- q->Scell_log = ctl->Scell_log;
- q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
- q->Scell_max = (255<<q->Scell_log);
- q->qth_min = ctl->qth_min<<ctl->Wlog;
- q->qth_max = ctl->qth_max<<ctl->Wlog;
-
- if (table->grio)
- q->prio=table->tab[ctl->DP]->prio;
- else
- q->prio=8;
-
- q->qcount = -1;
- PSCHED_SET_PASTPERFECT(q->qidlestart);
- memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
- }
- return 0;
+ if (q->backlog == 0)
+ red_end_of_idle_period(&q->parms);
+ red_set_parms(&q->parms,
+ ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
+ ctl->Scell_log, stab);
+
+ return 0;
}
-static int gred_init(struct Qdisc *sch, struct rtattr *opt)
+static int gred_change(struct Qdisc *sch, struct rtattr *opt)
{
struct gred_sched *table = qdisc_priv(sch);
- struct tc_gred_sopt *sopt;
- struct rtattr *tb[TCA_GRED_STAB];
- struct rtattr *tb2[TCA_GRED_DPS];
+ struct tc_gred_qopt *ctl;
+ struct rtattr *tb[TCA_GRED_MAX];
+ int err = -EINVAL, prio = GRED_DEF_PRIO;
+ u8 *stab;
- if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
return -EINVAL;
- if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
- rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
+ if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
+ return gred_change_table_def(sch, opt);
+
+ if (tb[TCA_GRED_PARMS-1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
+ tb[TCA_GRED_STAB-1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
+ return -EINVAL;
+
+ ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
+ stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
+
+ if (ctl->DP >= table->DPs)
+ goto errout;
- if (tb2[TCA_GRED_DPS-1] == 0)
- return -EINVAL;
+ if (gred_rio_mode(table)) {
+ if (ctl->prio == 0) {
+ int def_prio = GRED_DEF_PRIO;
- sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
- table->DPs=sopt->DPs;
- table->def=sopt->def_DP;
- table->grio=sopt->grio;
- table->initd=0;
- return 0;
+ if (table->tab[table->def])
+ def_prio = table->tab[table->def]->prio;
+
+ printk(KERN_DEBUG "GRED: DP %u does not have a prio "
+ "setting default to %d\n", ctl->DP, def_prio);
+
+ prio = def_prio;
+ } else
+ prio = ctl->prio;
+ }
+
+ sch_tree_lock(sch);
+
+ err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+ if (err < 0)
+ goto errout_locked;
+
+ if (gred_rio_mode(table)) {
+ gred_disable_wred_mode(table);
+ if (gred_wred_mode_check(sch))
+ gred_enable_wred_mode(table);
}
- DPRINTK("\n GRED_INIT error!\n");
- return -EINVAL;
+ err = 0;
+
+errout_locked:
+ sch_tree_unlock(sch);
+errout:
+ return err;
}
-static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+static int gred_init(struct Qdisc *sch, struct rtattr *opt)
{
- unsigned long qave;
- struct rtattr *rta;
- struct tc_gred_qopt *opt = NULL ;
- struct tc_gred_qopt *dst;
- struct gred_sched *table = qdisc_priv(sch);
- struct gred_sched_data *q;
- int i;
- unsigned char *b = skb->tail;
+ struct rtattr *tb[TCA_GRED_MAX];
- rta = (struct rtattr*)b;
- RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
+ return -EINVAL;
- opt=kmalloc(sizeof(struct tc_gred_qopt)*MAX_DPs, GFP_KERNEL);
+ if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
+ return -EINVAL;
- if (opt == NULL) {
- DPRINTK("gred_dump:failed to malloc for %Zd\n",
- sizeof(struct tc_gred_qopt)*MAX_DPs);
- goto rtattr_failure;
- }
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
+}
- memset(opt, 0, (sizeof(struct tc_gred_qopt))*table->DPs);
+static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct gred_sched *table = qdisc_priv(sch);
+ struct rtattr *parms, *opts = NULL;
+ int i;
+ struct tc_gred_sopt sopt = {
+ .DPs = table->DPs,
+ .def_DP = table->def,
+ .grio = gred_rio_mode(table),
+ .flags = table->red_flags,
+ };
- if (!table->initd) {
- DPRINTK("NO GRED Queues setup!\n");
- }
+ opts = RTA_NEST(skb, TCA_OPTIONS);
+ RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+ parms = RTA_NEST(skb, TCA_GRED_PARMS);
+
+ for (i = 0; i < MAX_DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+ struct tc_gred_qopt opt;
- for (i=0;i<MAX_DPs;i++) {
- dst= &opt[i];
- q= table->tab[i];
+ memset(&opt, 0, sizeof(opt));
if (!q) {
/* hack -- fix at some point with proper message
This is how we indicate to tc that there is no VQ
at this DP */
- dst->DP=MAX_DPs+i;
- continue;
+ opt.DP = MAX_DPs + i;
+ goto append_opt;
}
- dst->limit=q->limit;
- dst->qth_min=q->qth_min>>q->Wlog;
- dst->qth_max=q->qth_max>>q->Wlog;
- dst->DP=q->DP;
- dst->backlog=q->backlog;
- if (q->qave) {
- if (table->eqp && table->grio) {
- q->qidlestart=table->tab[table->def]->qidlestart;
- q->qave=table->tab[table->def]->qave;
- }
- if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
- long idle;
- psched_time_t now;
- PSCHED_GET_TIME(now);
- idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
- qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
- dst->qave = qave >> q->Wlog;
-
- } else {
- dst->qave = q->qave >> q->Wlog;
- }
- } else {
- dst->qave = 0;
+ opt.limit = q->limit;
+ opt.DP = q->DP;
+ opt.backlog = q->backlog;
+ opt.prio = q->prio;
+ opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
+ opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
+ opt.Wlog = q->parms.Wlog;
+ opt.Plog = q->parms.Plog;
+ opt.Scell_log = q->parms.Scell_log;
+ opt.other = q->stats.other;
+ opt.early = q->stats.prob_drop;
+ opt.forced = q->stats.forced_drop;
+ opt.pdrop = q->stats.pdrop;
+ opt.packets = q->packetsin;
+ opt.bytesin = q->bytesin;
+
+ if (gred_wred_mode(table)) {
+ q->parms.qidlestart =
+ table->tab[table->def]->parms.qidlestart;
+ q->parms.qavg = table->tab[table->def]->parms.qavg;
}
-
-
- dst->Wlog = q->Wlog;
- dst->Plog = q->Plog;
- dst->Scell_log = q->Scell_log;
- dst->other = q->other;
- dst->forced = q->forced;
- dst->early = q->early;
- dst->pdrop = q->pdrop;
- dst->prio = q->prio;
- dst->packets=q->packetsin;
- dst->bytesin=q->bytesin;
+
+ opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
+
+append_opt:
+ RTA_APPEND(skb, sizeof(opt), &opt);
}
- RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
- rta->rta_len = skb->tail - b;
+ RTA_NEST_END(skb, parms);
- kfree(opt);
- return skb->len;
+ return RTA_NEST_END(skb, opts);
rtattr_failure:
- if (opt)
- kfree(opt);
- DPRINTK("gred_dump: FAILURE!!!!\n");
-
-/* also free the opt struct here */
- skb_trim(skb, b - skb->data);
- return -1;
+ return RTA_NEST_CANCEL(skb, opts);
}
static void gred_destroy(struct Qdisc *sch)
@@ -594,15 +574,13 @@ static void gred_destroy(struct Qdisc *sch)
struct gred_sched *table = qdisc_priv(sch);
int i;
- for (i = 0;i < table->DPs; i++) {
+ for (i = 0; i < table->DPs; i++) {
if (table->tab[i])
- kfree(table->tab[i]);
+ gred_destroy_vq(table->tab[i]);
}
}
static struct Qdisc_ops gred_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "gred",
.priv_size = sizeof(struct gred_sched),
.enqueue = gred_enqueue,
@@ -621,10 +599,13 @@ static int __init gred_module_init(void)
{
return register_qdisc(&gred_qdisc_ops);
}
-static void __exit gred_module_exit(void)
+
+static void __exit gred_module_exit(void)
{
unregister_qdisc(&gred_qdisc_ops);
}
+
module_init(gred_module_init)
module_exit(gred_module_exit)
+
MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index bb9bf8d5003..cdc8d283791 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -25,6 +25,8 @@
#include <net/pkt_sched.h>
+#define VERSION "1.1"
+
/* Network Emulation Queuing algorithm.
====================================
@@ -185,10 +187,13 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
+ psched_tdiff_t delay;
+
+ delay = tabledist(q->latency, q->jitter,
+ &q->delay_cor, q->delay_dist);
+
PSCHED_GET_TIME(now);
- PSCHED_TADD2(now, tabledist(q->latency, q->jitter,
- &q->delay_cor, q->delay_dist),
- cb->time_to_send);
+ PSCHED_TADD2(now, delay, cb->time_to_send);
++q->counter;
ret = q->qdisc->enqueue(skb, q->qdisc);
} else {
@@ -248,24 +253,31 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
psched_time_t now;
- long delay;
/* if more time remaining? */
PSCHED_GET_TIME(now);
- delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
- pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
- if (delay <= 0) {
+
+ if (PSCHED_TLESS(cb->time_to_send, now)) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
- }
+ } else {
+ psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
+
+ if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
+ sch->qstats.drops++;
- mod_timer(&q->timer, jiffies + delay);
- sch->flags |= TCQ_F_THROTTLED;
+ /* After this qlen is confused */
+ printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
+ q->qdisc->ops->id);
- if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
- sch->qstats.drops++;
+ sch->q.qlen--;
+ }
+
+ mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
+ sch->flags |= TCQ_F_THROTTLED;
+ }
}
return NULL;
@@ -290,11 +302,16 @@ static void netem_reset(struct Qdisc *sch)
del_timer_sync(&q->timer);
}
+/* Pass size change message down to embedded FIFO */
static int set_fifo_limit(struct Qdisc *q, int limit)
{
struct rtattr *rta;
int ret = -ENOMEM;
+ /* Hack to avoid sending change message to non-FIFO */
+ if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
+ return 0;
+
rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
if (rta) {
rta->rta_type = RTM_NEWQDISC;
@@ -426,6 +443,84 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
return 0;
}
+/*
+ * Special case version of FIFO queue for use by netem.
+ * It queues in order based on timestamps in skb's
+ */
+struct fifo_sched_data {
+ u32 limit;
+};
+
+static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+{
+ struct fifo_sched_data *q = qdisc_priv(sch);
+ struct sk_buff_head *list = &sch->q;
+ const struct netem_skb_cb *ncb
+ = (const struct netem_skb_cb *)nskb->cb;
+ struct sk_buff *skb;
+
+ if (likely(skb_queue_len(list) < q->limit)) {
+ skb_queue_reverse_walk(list, skb) {
+ const struct netem_skb_cb *cb
+ = (const struct netem_skb_cb *)skb->cb;
+
+ if (PSCHED_TLESS(cb->time_to_send, ncb->time_to_send))
+ break;
+ }
+
+ __skb_queue_after(list, skb, nskb);
+
+ sch->qstats.backlog += nskb->len;
+ sch->bstats.bytes += nskb->len;
+ sch->bstats.packets++;
+
+ return NET_XMIT_SUCCESS;
+ }
+
+ return qdisc_drop(nskb, sch);
+}
+
+static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct fifo_sched_data *q = qdisc_priv(sch);
+
+ if (opt) {
+ struct tc_fifo_qopt *ctl = RTA_DATA(opt);
+ if (RTA_PAYLOAD(opt) < sizeof(*ctl))
+ return -EINVAL;
+
+ q->limit = ctl->limit;
+ } else
+ q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
+
+ return 0;
+}
+
+static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fifo_sched_data *q = qdisc_priv(sch);
+ struct tc_fifo_qopt opt = { .limit = q->limit };
+
+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ return skb->len;
+
+rtattr_failure:
+ return -1;
+}
+
+static struct Qdisc_ops tfifo_qdisc_ops = {
+ .id = "tfifo",
+ .priv_size = sizeof(struct fifo_sched_data),
+ .enqueue = tfifo_enqueue,
+ .dequeue = qdisc_dequeue_head,
+ .requeue = qdisc_requeue,
+ .drop = qdisc_queue_drop,
+ .init = tfifo_init,
+ .reset = qdisc_reset_queue,
+ .change = tfifo_init,
+ .dump = tfifo_dump,
+};
+
static int netem_init(struct Qdisc *sch, struct rtattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -438,7 +533,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
q->timer.function = netem_watchdog;
q->timer.data = (unsigned long) sch;
- q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops);
if (!q->qdisc) {
pr_debug("netem: qdisc create failed\n");
return -ENOMEM;
@@ -601,6 +696,7 @@ static struct Qdisc_ops netem_qdisc_ops = {
static int __init netem_module_init(void)
{
+ pr_info("netem: version " VERSION "\n");
return register_qdisc(&netem_qdisc_ops);
}
static void __exit netem_module_exit(void)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 7845d045eec..dccfa44c2d7 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -9,76 +9,23 @@
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Changes:
- * J Hadi Salim <hadi@nortel.com> 980914: computation fixes
+ * J Hadi Salim 980914: computation fixes
* Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
- * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support
+ * J Hadi Salim 980816: ECN support
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
-#include <net/dsfield.h>
+#include <net/red.h>
-/* Random Early Detection (RED) algorithm.
- =======================================
-
- Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
- for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
-
- This file codes a "divisionless" version of RED algorithm
- as written down in Fig.17 of the paper.
-
-Short description.
-------------------
-
- When a new packet arrives we calculate the average queue length:
-
- avg = (1-W)*avg + W*current_queue_len,
-
- W is the filter time constant (chosen as 2^(-Wlog)), it controls
- the inertia of the algorithm. To allow larger bursts, W should be
- decreased.
-
- if (avg > th_max) -> packet marked (dropped).
- if (avg < th_min) -> packet passes.
- if (th_min < avg < th_max) we calculate probability:
-
- Pb = max_P * (avg - th_min)/(th_max-th_min)
-
- and mark (drop) packet with this probability.
- Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
- max_P should be small (not 1), usually 0.01..0.02 is good value.
-
- max_P is chosen as a number, so that max_P/(th_max-th_min)
- is a negative power of two in order arithmetics to contain
- only shifts.
-
-
- Parameters, settable by user:
+/* Parameters, settable by user:
-----------------------------
limit - bytes (must be > qth_max + burst)
@@ -89,243 +36,93 @@ Short description.
arbitrarily high (well, less than ram size)
Really, this limit will never be reached
if RED works correctly.
-
- qth_min - bytes (should be < qth_max/2)
- qth_max - bytes (should be at least 2*qth_min and less limit)
- Wlog - bits (<32) log(1/W).
- Plog - bits (<32)
-
- Plog is related to max_P by formula:
-
- max_P = (qth_max-qth_min)/2^Plog;
-
- F.e. if qth_max=128K and qth_min=32K, then Plog=22
- corresponds to max_P=0.02
-
- Scell_log
- Stab
-
- Lookup table for log((1-W)^(t/t_ave).
-
-
-NOTES:
-
-Upper bound on W.
------------------
-
- If you want to allow bursts of L packets of size S,
- you should choose W:
-
- L + 1 - th_min/S < (1-(1-W)^L)/W
-
- th_min/S = 32 th_min/S = 4
-
- log(W) L
- -1 33
- -2 35
- -3 39
- -4 46
- -5 57
- -6 75
- -7 101
- -8 135
- -9 190
- etc.
*/
struct red_sched_data
{
-/* Parameters */
- u32 limit; /* HARD maximal queue length */
- u32 qth_min; /* Min average length threshold: A scaled */
- u32 qth_max; /* Max average length threshold: A scaled */
- u32 Rmask;
- u32 Scell_max;
- unsigned char flags;
- char Wlog; /* log(W) */
- char Plog; /* random number bits */
- char Scell_log;
- u8 Stab[256];
-
-/* Variables */
- unsigned long qave; /* Average queue length: A scaled */
- int qcount; /* Packets since last random number generation */
- u32 qR; /* Cached random number */
-
- psched_time_t qidlestart; /* Start of idle period */
- struct tc_red_xstats st;
+ u32 limit; /* HARD maximal queue length */
+ unsigned char flags;
+ struct red_parms parms;
+ struct red_stats stats;
};
-static int red_ecn_mark(struct sk_buff *skb)
+static inline int red_use_ecn(struct red_sched_data *q)
{
- if (skb->nh.raw + 20 > skb->tail)
- return 0;
-
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- if (INET_ECN_is_not_ect(skb->nh.iph->tos))
- return 0;
- IP_ECN_set_ce(skb->nh.iph);
- return 1;
- case __constant_htons(ETH_P_IPV6):
- if (INET_ECN_is_not_ect(ipv6_get_dsfield(skb->nh.ipv6h)))
- return 0;
- IP6_ECN_set_ce(skb->nh.ipv6h);
- return 1;
- default:
- return 0;
- }
+ return q->flags & TC_RED_ECN;
}
-static int
-red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static inline int red_use_harddrop(struct red_sched_data *q)
+{
+ return q->flags & TC_RED_HARDDROP;
+}
+
+static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct red_sched_data *q = qdisc_priv(sch);
- psched_time_t now;
+ q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
- if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
- long us_idle;
- int shift;
+ if (red_is_idling(&q->parms))
+ red_end_of_idle_period(&q->parms);
- PSCHED_GET_TIME(now);
- us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
- PSCHED_SET_PASTPERFECT(q->qidlestart);
+ switch (red_action(&q->parms, q->parms.qavg)) {
+ case RED_DONT_MARK:
+ break;
-/*
- The problem: ideally, average length queue recalcultion should
- be done over constant clock intervals. This is too expensive, so that
- the calculation is driven by outgoing packets.
- When the queue is idle we have to model this clock by hand.
-
- SF+VJ proposed to "generate" m = idletime/(average_pkt_size/bandwidth)
- dummy packets as a burst after idle time, i.e.
-
- q->qave *= (1-W)^m
-
- This is an apparently overcomplicated solution (f.e. we have to precompute
- a table to make this calculation in reasonable time)
- I believe that a simpler model may be used here,
- but it is field for experiments.
-*/
- shift = q->Stab[us_idle>>q->Scell_log];
-
- if (shift) {
- q->qave >>= shift;
- } else {
- /* Approximate initial part of exponent
- with linear function:
- (1-W)^m ~= 1-mW + ...
-
- Seems, it is the best solution to
- problem of too coarce exponent tabulation.
- */
-
- us_idle = (q->qave * us_idle)>>q->Scell_log;
- if (us_idle < q->qave/2)
- q->qave -= us_idle;
- else
- q->qave >>= 1;
- }
- } else {
- q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
- /* NOTE:
- q->qave is fixed point number with point at Wlog.
- The formulae above is equvalent to floating point
- version:
-
- qave = qave*(1-W) + sch->qstats.backlog*W;
- --ANK (980924)
- */
- }
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
- if (q->qave < q->qth_min) {
- q->qcount = -1;
-enqueue:
- if (sch->qstats.backlog + skb->len <= q->limit) {
- __skb_queue_tail(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
- sch->bstats.packets++;
- return NET_XMIT_SUCCESS;
- } else {
- q->st.pdrop++;
- }
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
- if (q->qave >= q->qth_max) {
- q->qcount = -1;
- sch->qstats.overlimits++;
-mark:
- if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
- q->st.early++;
- goto drop;
- }
- q->st.marked++;
- goto enqueue;
- }
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (red_use_harddrop(q) || !red_use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
- if (++q->qcount) {
- /* The formula used below causes questions.
-
- OK. qR is random number in the interval 0..Rmask
- i.e. 0..(2^Plog). If we used floating point
- arithmetics, it would be: (2^Plog)*rnd_num,
- where rnd_num is less 1.
-
- Taking into account, that qave have fixed
- point at Wlog, and Plog is related to max_P by
- max_P = (qth_max-qth_min)/2^Plog; two lines
- below have the following floating point equivalent:
-
- max_P*(qave - qth_min)/(qth_max-qth_min) < rnd/qcount
-
- Any questions? --ANK (980924)
- */
- if (((q->qave - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
- goto enqueue;
- q->qcount = 0;
- q->qR = net_random()&q->Rmask;
- sch->qstats.overlimits++;
- goto mark;
+ q->stats.forced_mark++;
+ break;
}
- q->qR = net_random()&q->Rmask;
- goto enqueue;
-drop:
- kfree_skb(skb);
- sch->qstats.drops++;
+ if (sch->qstats.backlog + skb->len <= q->limit)
+ return qdisc_enqueue_tail(skb, sch);
+
+ q->stats.pdrop++;
+ return qdisc_drop(skb, sch);
+
+congestion_drop:
+ qdisc_drop(skb, sch);
return NET_XMIT_CN;
}
-static int
-red_requeue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
struct red_sched_data *q = qdisc_priv(sch);
- PSCHED_SET_PASTPERFECT(q->qidlestart);
+ if (red_is_idling(&q->parms))
+ red_end_of_idle_period(&q->parms);
- __skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += skb->len;
- sch->qstats.requeues++;
- return 0;
+ return qdisc_requeue(skb, sch);
}
-static struct sk_buff *
-red_dequeue(struct Qdisc* sch)
+static struct sk_buff * red_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
- skb = __skb_dequeue(&sch->q);
- if (skb) {
- sch->qstats.backlog -= skb->len;
- return skb;
- }
- PSCHED_GET_TIME(q->qidlestart);
- return NULL;
+ skb = qdisc_dequeue_head(sch);
+
+ if (skb == NULL && !red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+
+ return skb;
}
static unsigned int red_drop(struct Qdisc* sch)
@@ -333,16 +130,17 @@ static unsigned int red_drop(struct Qdisc* sch)
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
- skb = __skb_dequeue_tail(&sch->q);
+ skb = qdisc_dequeue_tail(sch);
if (skb) {
unsigned int len = skb->len;
- sch->qstats.backlog -= len;
- sch->qstats.drops++;
- q->st.other++;
- kfree_skb(skb);
+ q->stats.other++;
+ qdisc_drop(skb, sch);
return len;
}
- PSCHED_GET_TIME(q->qidlestart);
+
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+
return 0;
}
@@ -350,43 +148,38 @@ static void red_reset(struct Qdisc* sch)
{
struct red_sched_data *q = qdisc_priv(sch);
- __skb_queue_purge(&sch->q);
- sch->qstats.backlog = 0;
- PSCHED_SET_PASTPERFECT(q->qidlestart);
- q->qave = 0;
- q->qcount = -1;
+ qdisc_reset_queue(sch);
+ red_restart(&q->parms);
}
static int red_change(struct Qdisc *sch, struct rtattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
- struct rtattr *tb[TCA_RED_STAB];
+ struct rtattr *tb[TCA_RED_MAX];
struct tc_red_qopt *ctl;
- if (opt == NULL ||
- rtattr_parse_nested(tb, TCA_RED_STAB, opt) ||
- tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 ||
+ if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
+ return -EINVAL;
+
+ if (tb[TCA_RED_PARMS-1] == NULL ||
RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
- RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256)
+ tb[TCA_RED_STAB-1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
return -EINVAL;
ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
sch_tree_lock(sch);
q->flags = ctl->flags;
- q->Wlog = ctl->Wlog;
- q->Plog = ctl->Plog;
- q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
- q->Scell_log = ctl->Scell_log;
- q->Scell_max = (255<<q->Scell_log);
- q->qth_min = ctl->qth_min<<ctl->Wlog;
- q->qth_max = ctl->qth_max<<ctl->Wlog;
q->limit = ctl->limit;
- memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
- q->qcount = -1;
+ red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Plog, ctl->Scell_log,
+ RTA_DATA(tb[TCA_RED_STAB-1]));
+
if (skb_queue_empty(&sch->q))
- PSCHED_SET_PASTPERFECT(q->qidlestart);
+ red_end_of_idle_period(&q->parms);
+
sch_tree_unlock(sch);
return 0;
}
@@ -399,39 +192,39 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct red_sched_data *q = qdisc_priv(sch);
- unsigned char *b = skb->tail;
- struct rtattr *rta;
- struct tc_red_qopt opt;
-
- rta = (struct rtattr*)b;
- RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
- opt.limit = q->limit;
- opt.qth_min = q->qth_min>>q->Wlog;
- opt.qth_max = q->qth_max>>q->Wlog;
- opt.Wlog = q->Wlog;
- opt.Plog = q->Plog;
- opt.Scell_log = q->Scell_log;
- opt.flags = q->flags;
+ struct rtattr *opts = NULL;
+ struct tc_red_qopt opt = {
+ .limit = q->limit,
+ .flags = q->flags,
+ .qth_min = q->parms.qth_min >> q->parms.Wlog,
+ .qth_max = q->parms.qth_max >> q->parms.Wlog,
+ .Wlog = q->parms.Wlog,
+ .Plog = q->parms.Plog,
+ .Scell_log = q->parms.Scell_log,
+ };
+
+ opts = RTA_NEST(skb, TCA_OPTIONS);
RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
- rta->rta_len = skb->tail - b;
-
- return skb->len;
+ return RTA_NEST_END(skb, opts);
rtattr_failure:
- skb_trim(skb, b - skb->data);
- return -1;
+ return RTA_NEST_CANCEL(skb, opts);
}
static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct red_sched_data *q = qdisc_priv(sch);
-
- return gnet_stats_copy_app(d, &q->st, sizeof(q->st));
+ struct tc_red_xstats st = {
+ .early = q->stats.prob_drop + q->stats.forced_drop,
+ .pdrop = q->stats.pdrop,
+ .other = q->stats.other,
+ .marked = q->stats.prob_mark + q->stats.forced_mark,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
}
static struct Qdisc_ops red_qdisc_ops = {
- .next = NULL,
- .cl_ops = NULL,
.id = "red",
.priv_size = sizeof(struct red_sched_data),
.enqueue = red_enqueue,
@@ -450,10 +243,13 @@ static int __init red_module_init(void)
{
return register_qdisc(&red_qdisc_ops);
}
-static void __exit red_module_exit(void)
+
+static void __exit red_module_exit(void)
{
unregister_qdisc(&red_qdisc_ops);
}
+
module_init(red_module_init)
module_exit(red_module_exit)
+
MODULE_LICENSE("GPL");