summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-12-21 19:03:44 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-03 13:11:05 -0800
commitc865e5d99e25a171e8262fc0f7ba608568633c64 (patch)
tree5d8cd6a5a4623d3497f2eb0c14e80511f5b2ef73 /net/sched
parent8cbb512e50fb702b5b1d444f76ebcdb53577b2ec (diff)
[PKT_SCHED] netem: packet corruption option
Here is a new feature for netem in 2.6.16. It adds the ability to randomly corrupt packets with netem. A version was done by Hagen Paul Pfeifer, but I redid it to handle the cases of backwards compatibility with netlink interface and presence of hardware checksum offload. It is useful for testing hardware offload in devices. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_netem.c49
1 files changed, 46 insertions, 3 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 82fb07aa06a..ba528320483 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -25,7 +25,7 @@
#include <net/pkt_sched.h>
-#define VERSION "1.1"
+#define VERSION "1.2"
/* Network Emulation Queuing algorithm.
====================================
@@ -65,11 +65,12 @@ struct netem_sched_data {
u32 jitter;
u32 duplicate;
u32 reorder;
+ u32 corrupt;
struct crndstate {
unsigned long last;
unsigned long rho;
- } delay_cor, loss_cor, dup_cor, reorder_cor;
+ } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
struct disttable {
u32 size;
@@ -183,6 +184,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->duplicate = dupsave;
}
+ /*
+ * Randomized packet corruption.
+ * Make copy if needed since we are modifying
+ * If packet is going to be hardware checksummed, then
+ * do it now in software before we mangle it.
+ */
+ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+ if (!(skb = skb_unshare(skb, GFP_ATOMIC))
+ || (skb->ip_summed == CHECKSUM_HW
+ && skb_checksum_help(skb, 0))) {
+ sch->qstats.drops++;
+ return NET_XMIT_DROP;
+ }
+
+ skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
+ }
+
if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
@@ -382,6 +400,20 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
return 0;
}
+static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ const struct tc_netem_corrupt *r = RTA_DATA(attr);
+
+ if (RTA_PAYLOAD(attr) != sizeof(*r))
+ return -EINVAL;
+
+ q->corrupt = r->probability;
+ init_crandom(&q->corrupt_cor, r->correlation);
+ return 0;
+}
+
+/* Parse netlink message to set options */
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -432,13 +464,19 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
if (ret)
return ret;
}
+
if (tb[TCA_NETEM_REORDER-1]) {
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
if (ret)
return ret;
}
- }
+ if (tb[TCA_NETEM_CORRUPT-1]) {
+ ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
@@ -564,6 +602,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
+ struct tc_netem_corrupt corrupt;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
@@ -582,6 +621,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
reorder.correlation = q->reorder_cor.rho;
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
+ corrupt.probability = q->corrupt;
+ corrupt.correlation = q->corrupt_cor.rho;
+ RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+
rta->rta_len = skb->tail - b;
return skb->len;