summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_netem.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_netem.c')
-rw-r--r--net/sched/sch_netem.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c..298c0ddfb57 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct sk_buff_head *list = &sch->q;
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
- struct sk_buff *skb;
-
- if (likely(skb_queue_len(list) < sch->limit)) {
- skb = skb_peek_tail(list);
- /* Optimize for add at tail */
- if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
- return qdisc_enqueue_tail(nskb, sch);
+ struct sk_buff *skb = skb_peek_tail(list);
- skb_queue_reverse_walk(list, skb) {
- if (tnext >= netem_skb_cb(skb)->time_to_send)
- break;
- }
+ /* Optimize for add at tail */
+ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+ return __skb_queue_tail(list, nskb);
- __skb_queue_after(list, skb, nskb);
- sch->qstats.backlog += qdisc_pkt_len(nskb);
- return NET_XMIT_SUCCESS;
+ skb_queue_reverse_walk(list, skb) {
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ break;
}
- return qdisc_reshape_fail(nskb, sch);
+ __skb_queue_after(list, skb, nskb);
}
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
- int ret;
int count = 1;
/* Random duplication */
@@ -388,7 +380,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
- skb_orphan(skb);
+ /* If a delay is expected, orphan the skb. (orphaning usually takes
+ * place at TX completion time, so _before_ the link transit delay)
+ * Ideally, this orphaning should be done after the rate limiting
+ * module, because this breaks TCP Small Queue, and other mechanisms
+ * based on socket sk_wmem_alloc.
+ */
+ if (q->latency || q->jitter)
+ skb_orphan(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
@@ -419,6 +418,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
+ if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ return qdisc_reshape_fail(skb, sch);
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +454,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = now + delay;
++q->counter;
- ret = tfifo_enqueue(skb, sch);
+ tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@@ -460,16 +464,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->counter = 0;
__skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
- ret = NET_XMIT_SUCCESS;
- }
-
- if (ret != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
- return ret;
- }
}
return NET_XMIT_SUCCESS;