summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c7
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c65
-rw-r--r--net/sched/sch_atm.c18
-rw-r--r--net/sched/sch_cbq.c31
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c40
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c14
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c17
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c9
16 files changed, 133 insertions, 140 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 74e662cbb2c..26c7e1f9a35 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -41,7 +41,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
return;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
}
EXPORT_SYMBOL(tcf_hash_destroy);
@@ -205,10 +205,9 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
{
struct tcf_common *p = NULL;
if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
- if (bind) {
+ if (bind)
p->tcfc_bindcnt++;
- p->tcfc_refcnt++;
- }
+ p->tcfc_refcnt++;
a->priv = p;
}
return p;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 32c3f9d9fb7..38015b49394 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -116,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p)
return;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
}
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 527db2559dd..246f9065ce3 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -345,7 +345,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
}
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return 0;
}
@@ -368,7 +368,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode **hn;
- BUG_TRAP(!ht->refcnt);
+ WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht);
@@ -380,7 +380,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return -ENOENT;
}
@@ -389,7 +389,7 @@ static void u32_destroy(struct tcf_proto *tp)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
- BUG_TRAP(root_ht != NULL);
+ WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
@@ -407,7 +407,7 @@ static void u32_destroy(struct tcf_proto *tp)
while ((ht = tp_c->hlist) != NULL) {
tp_c->hlist = ht->next;
- BUG_TRAP(ht->refcnt == 0);
+ WARN_ON(ht->refcnt != 0);
kfree(ht);
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b0601642e22..ba1d121f312 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -189,7 +189,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *q, *txq_root = txq->qdisc;
+ struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
if (!(txq_root->flags & TCQ_F_BUILTIN) &&
txq_root->handle == handle)
@@ -572,44 +572,21 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc)
{
+ struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
spinlock_t *root_lock;
- struct Qdisc *oqdisc;
- int ingress;
-
- ingress = 0;
- if (qdisc && qdisc->flags&TCQ_F_INGRESS)
- ingress = 1;
-
- if (ingress) {
- oqdisc = dev_queue->qdisc;
- } else {
- oqdisc = dev_queue->qdisc_sleeping;
- }
root_lock = qdisc_root_lock(oqdisc);
spin_lock_bh(root_lock);
- if (ingress) {
- /* Prune old scheduler */
- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
- /* delete */
- qdisc_reset(oqdisc);
- dev_queue->qdisc = NULL;
- } else { /* new */
- dev_queue->qdisc = qdisc;
- }
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ qdisc_reset(oqdisc);
- } else {
- /* Prune old scheduler */
- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
- qdisc_reset(oqdisc);
-
- /* ... and graft new one */
- if (qdisc == NULL)
- qdisc = &noop_qdisc;
- dev_queue->qdisc_sleeping = qdisc;
- dev_queue->qdisc = &noop_qdisc;
- }
+ /* ... and graft new one */
+ if (qdisc == NULL)
+ qdisc = &noop_qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
+ dev_queue->qdisc = &noop_qdisc;
spin_unlock_bh(root_lock);
@@ -678,7 +655,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
ingress = 0;
num_q = dev->num_tx_queues;
- if (q && q->flags & TCQ_F_INGRESS) {
+ if ((q && q->flags & TCQ_F_INGRESS) ||
+ (new && new->flags & TCQ_F_INGRESS)) {
num_q = 1;
ingress = 1;
}
@@ -692,13 +670,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (!ingress)
dev_queue = netdev_get_tx_queue(dev, i);
- if (ingress) {
- old = dev_graft_qdisc(dev_queue, q);
- } else {
- old = dev_graft_qdisc(dev_queue, new);
- if (new && i > 0)
- atomic_inc(&new->refcnt);
- }
+ old = dev_graft_qdisc(dev_queue, new);
+ if (new && i > 0)
+ atomic_inc(&new->refcnt);
+
notify_and_destroy(skb, n, classid, old, new);
}
@@ -817,8 +792,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
goto err_out3;
}
}
- if (parent)
- list_add_tail(&sch->list, &dev_queue->qdisc->list);
+ if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
+ list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
return sch;
}
@@ -1261,11 +1236,11 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
q_idx = 0;
dev_queue = netdev_get_tx_queue(dev, 0);
- if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
+ if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
dev_queue = &dev->rx_queue;
- if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
+ if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
cont:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 04faa835be1..43d37256c15 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -162,7 +162,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
qdisc_destroy(flow->q);
tcf_destroy_chain(&flow->filter_list);
if (flow->sock) {
- pr_debug("atm_tc_put: f_count %d\n",
+ pr_debug("atm_tc_put: f_count %ld\n",
file_count(flow->sock->file));
flow->vcc->pop = flow->old_pop;
sockfd_put(flow->sock);
@@ -259,7 +259,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
sock = sockfd_lookup(fd, &error);
if (!sock)
return error; /* f_count++ */
- pr_debug("atm_tc_change: f_count %d\n", file_count(sock->file));
+ pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
error = -EPROTOTYPE;
goto err_out;
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
kfree_skb(skb);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
kfree_skb(skb);
goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, flow->q);
if (ret != 0) {
drop: __maybe_unused
- sch->qstats.drops++;
- if (flow)
- flow->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ if (flow)
+ flow->qstats.drops++;
+ }
return ret;
}
sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -455,7 +457,7 @@ drop: __maybe_unused
return 0;
}
tasklet_schedule(&p->task);
- return NET_XMIT_BYPASS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
/*
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
if (!ret) {
sch->q.qlen++;
sch->qstats.requeues++;
- } else {
+ } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
p->link.qstats.drops++;
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f1d2f8ec8b4..4e261ce62f4 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
for (;;) {
int result = 0;
defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
case TC_ACT_RECLASSIFY:
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->rx_class = cl;
#endif
if (cl == NULL) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
- sch->qstats.drops++;
- cbq_mark_toplevel(q, cl);
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cbq_mark_toplevel(q, cl);
+ cl->qstats.drops++;
+ }
return ret;
}
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_activate_class(cl);
return 0;
}
- sch->qstats.drops++;
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return ret;
}
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+ int ret;
cbq_mark_toplevel(q, cl);
q->rx_class = cl;
cl->q->__parent = sch;
- if (qdisc_enqueue(skb, cl->q) == 0) {
+ ret = qdisc_enqueue(skb, cl->q);
+ if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
cbq_activate_class(cl);
return 0;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return 0;
}
@@ -1175,7 +1182,7 @@ static void cbq_unlink_class(struct cbq_class *this)
this->tparent->children = NULL;
}
} else {
- BUG_TRAP(this->sibling == this);
+ WARN_ON(this->sibling != this);
}
}
@@ -1699,7 +1706,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- BUG_TRAP(!cl->filters);
+ WARN_ON(cl->filters);
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e..edd1298f85f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
kfree_skb(skb);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, p->q);
if (err != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err))
+ sch->qstats.drops++;
return err;
}
@@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
drop:
kfree_skb(skb);
sch->qstats.drops++;
- return NET_XMIT_BYPASS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
err = p->q->ops->requeue(skb, p->q);
if (err != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err))
+ sch->qstats.drops++;
return err;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 43abd4d27ea..7cf83b37459 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,7 +29,7 @@
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
- * qdisc_root_lock(qdisc) spinlock.
+ * qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
@@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q)
if (unlikely((skb = dequeue_skb(q)) == NULL))
return 0;
- root_lock = qdisc_root_lock(q);
+ root_lock = qdisc_lock(q);
/* And release qdisc */
spin_unlock(root_lock);
@@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_subqueue_stopped(dev, skb))
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
@@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q)
break;
}
- if (ret && netif_tx_queue_stopped(txq))
+ if (ret && (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq)))
ret = 0;
return ret;
@@ -505,7 +507,7 @@ errout:
}
EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under qdisc_root_lock(qdisc) and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
@@ -541,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head)
kfree((char *) qdisc - qdisc->padded);
}
-/* Under qdisc_root_lock(qdisc) and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
@@ -596,7 +598,7 @@ static void transition_one_qdisc(struct net_device *dev,
int *need_watchdog_p = _need_watchdog;
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
- if (new_qdisc != &noqueue_qdisc)
+ if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
}
@@ -619,6 +621,7 @@ void dev_activate(struct net_device *dev)
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
+ transition_one_qdisc(dev, &dev->rx_queue, NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
@@ -656,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc;
- root_lock = qdisc_root_lock(q);
+ root_lock = qdisc_lock(q);
if (lock)
spin_lock_bh(root_lock);
@@ -677,6 +680,7 @@ void dev_deactivate(struct net_device *dev)
bool running;
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
dev_watchdog_down(dev);
@@ -718,7 +722,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
+ dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
@@ -731,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
- spinlock_t *root_lock = qdisc_root_lock(qdisc);
+ spinlock_t *root_lock = qdisc_lock(qdisc);
dev_queue->qdisc = qdisc_default;
dev_queue->qdisc_sleeping = qdisc_default;
@@ -745,6 +749,6 @@ static void shutdown_scheduler_queue(struct net_device *dev,
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
- BUG_TRAP(!timer_pending(&dev->watchdog_timer));
+ shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ WARN_ON(timer_pending(&dev->watchdog_timer));
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba..c2b8d9cce3d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (cl->level == 0)
return cl;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
- if (err == NET_XMIT_BYPASS)
+ if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return err;
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
- cl->qstats.drops++;
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err)) {
+ cl->qstats.drops++;
+ sch->qstats.drops++;
+ }
return err;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30c999c61b0..be35422711a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
return cl;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -524,7 +524,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
@@ -542,7 +542,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(cl->prio_activity);
+ WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
@@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
#endif
- } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
+ } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return NET_XMIT_DROP;
} else {
cl->bstats.packets +=
@@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
#endif
- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+ } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return NET_XMIT_DROP;
} else
htb_activate(q, cl);
@@ -757,7 +761,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
- BUG_TRAP(tree->rb_node);
+ WARN_ON(!tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
@@ -777,7 +781,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
- BUG_TRAP(*sp->pptr);
+ WARN_ON(!*sp->pptr);
if (!*sp->pptr)
return NULL;
htb_next_rb_node(sp->pptr);
@@ -792,7 +796,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return NULL;
}
@@ -810,7 +814,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
do {
next:
- BUG_TRAP(cl);
+ WARN_ON(!cl);
if (!cl)
return NULL;
@@ -1185,7 +1189,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
{
struct htb_class *parent = cl->parent;
- BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
+ WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
@@ -1205,7 +1209,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
- BUG_TRAP(cl->un.leaf.q);
+ WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
gen_kill_estimator(&cl->bstats, &cl->rate_est);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a5908570067..fb0294d0b55 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (count == 0) {
sch->qstats.drops++;
kfree_skb(skb);
- return NET_XMIT_BYPASS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
skb_orphan(skb);
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- } else
+ } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
+ }
pr_debug("netem: enqueue ret %d\n", ret);
return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb09..eac197610ed 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct tcf_result res;
int err;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return ret;
}
@@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
sch->qstats.requeues++;
return 0;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return NET_XMIT_DROP;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bb..5da05839e22 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
sch->q.qlen++;
- } else {
+ } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
sch->qstats.drops++;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 73f53844ce9..6e041d10dbd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
if (!q->filter_list)
return sfq_hash(q, skb) + 1;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, q->filter_list, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return 0;
}
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret);
if (hash == 0) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret);
if (hash == 0) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -536,14 +536,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.limit = q->limit;
opt.divisor = SFQ_HASH_DIVISOR;
- opt.flows = 0;
- if (q->tail != SFQ_DEPTH) {
- unsigned int i;
-
- for (i = 0; i < SFQ_HASH_DIVISOR; i++)
- if (q->ht[i] != SFQ_DEPTH)
- opt.flows++;
- }
+ opt.flows = q->limit;
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f763..7d3b7ff3bf0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return ret;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 537223642b6..2c35c678563 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -305,10 +305,11 @@ restart:
switch (teql_resolve(skb, skb_res, slave)) {
case 0:
- if (netif_tx_trylock(slave)) {
- if (!__netif_subqueue_stopped(slave, subq) &&
+ if (__netif_tx_trylock(slave_txq)) {
+ if (!netif_tx_queue_stopped(slave_txq) &&
+ !netif_tx_queue_frozen(slave_txq) &&
slave->hard_start_xmit(skb, slave) == 0) {
- netif_tx_unlock(slave);
+ __netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
master->stats.tx_packets++;
@@ -316,7 +317,7 @@ restart:
qdisc_pkt_len(skb);
return 0;
}
- netif_tx_unlock(slave);
+ __netif_tx_unlock(slave_txq);
}
if (netif_queue_stopped(dev))
busy = 1;