diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fd2a6cadb11..7cf83b37459 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -29,7 +29,7 @@ /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * qdisc_root_lock(qdisc) spinlock. + * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock @@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q) if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); @@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_subqueue_stopped(dev, skb)) + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q) break; } - if (ret && netif_tx_queue_stopped(txq)) + if (ret && (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq))) ret = 0; return ret; @@ -505,7 +507,7 @@ errout: } EXPORT_SYMBOL(qdisc_create_dflt); -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { @@ -541,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head) kfree((char *) qdisc - qdisc->padded); } -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_destroy(struct Qdisc *qdisc) { @@ -596,7 +598,7 @@ static void transition_one_qdisc(struct net_device *dev, int *need_watchdog_p = _need_watchdog; rcu_assign_pointer(dev_queue->qdisc, new_qdisc); - if (new_qdisc != &noqueue_qdisc) + if (need_watchdog_p && new_qdisc != &noqueue_qdisc) *need_watchdog_p = 1; } @@ -619,6 +621,7 @@ void dev_activate(struct net_device *dev) need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); + transition_one_qdisc(dev, &dev->rx_queue, NULL); if (need_watchdog) { dev->trans_start = jiffies; @@ -656,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) dev_queue = netdev_get_tx_queue(dev, i); q = dev_queue->qdisc; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (lock) spin_lock_bh(root_lock); @@ -677,6 +680,7 @@ void dev_deactivate(struct net_device *dev) bool running; netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); + dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); dev_watchdog_down(dev); @@ -718,7 +722,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); - dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); + dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } @@ -731,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - spinlock_t *root_lock = qdisc_root_lock(qdisc); + spinlock_t *root_lock = qdisc_lock(qdisc); dev_queue->qdisc = qdisc_default; dev_queue->qdisc_sleeping = qdisc_default; @@ -745,6 +749,6 @@ static void shutdown_scheduler_queue(struct net_device *dev, void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); - shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); + shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } |