summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_netem.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-09-04 15:01:37 +0200
committerArnd Bergmann <arnd@arndb.de>2012-09-04 15:01:37 +0200
commit863e99a8c1ea2b0391491904297f57a0f6a1fdd6 (patch)
tree0e7789f83c0ba3a1bc3c19d3ccf5ea6f84f19db6 /net/sched/sch_netem.c
parentdd9bf78040fa0da4cecc228e1682b9682b8cb180 (diff)
parenta849088aa1552b1a28eea3daff599ee22a734ae3 (diff)
Merge commit 'a849088aa1' from rmk/fixes into cleanup/io-pci
As Stephen Rothwell reports, a849088aa155 ("ARM: Fix ioremap() of address zero") from the arm-current tree and commit c2794437091a ("ARM: Add fixed PCI i/o mapping") from the arm-soc tree conflict in a nontrivial way in arch/arm/mm/mmu.c. Rob Herring explains: The PCI i/o reserved area has a dummy physical address of 0 and needs to be skipped by ioremap searches. So we don't set VM_ARM_STATIC_MAPPING to prevent matches by ioremap. The vm_struct settings don't really matter when we do the real mapping of the i/o space. Since commit a849088aa155 is at the start of the fixes branch in the arm tree, we can merge it into the branch that contains the other ioremap changes. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Rob Herring <rob.herring@calxeda.com> Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r--net/sched/sch_netem.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c..298c0ddfb57 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct sk_buff_head *list = &sch->q;
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
- struct sk_buff *skb;
-
- if (likely(skb_queue_len(list) < sch->limit)) {
- skb = skb_peek_tail(list);
- /* Optimize for add at tail */
- if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
- return qdisc_enqueue_tail(nskb, sch);
+ struct sk_buff *skb = skb_peek_tail(list);
- skb_queue_reverse_walk(list, skb) {
- if (tnext >= netem_skb_cb(skb)->time_to_send)
- break;
- }
+ /* Optimize for add at tail */
+ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+ return __skb_queue_tail(list, nskb);
- __skb_queue_after(list, skb, nskb);
- sch->qstats.backlog += qdisc_pkt_len(nskb);
- return NET_XMIT_SUCCESS;
+ skb_queue_reverse_walk(list, skb) {
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ break;
}
- return qdisc_reshape_fail(nskb, sch);
+ __skb_queue_after(list, skb, nskb);
}
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
- int ret;
int count = 1;
/* Random duplication */
@@ -388,7 +380,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
- skb_orphan(skb);
+ /* If a delay is expected, orphan the skb. (orphaning usually takes
+ * place at TX completion time, so _before_ the link transit delay)
+ * Ideally, this orphaning should be done after the rate limiting
+ * module, because this breaks TCP Small Queue, and other mechanisms
+ * based on socket sk_wmem_alloc.
+ */
+ if (q->latency || q->jitter)
+ skb_orphan(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
@@ -419,6 +418,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
+ if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ return qdisc_reshape_fail(skb, sch);
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +454,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = now + delay;
++q->counter;
- ret = tfifo_enqueue(skb, sch);
+ tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@@ -460,16 +464,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->counter = 0;
__skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
- ret = NET_XMIT_SUCCESS;
- }
-
- if (ret != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
- return ret;
- }
}
return NET_XMIT_SUCCESS;