diff options
-rw-r--r-- | drivers/net/mv643xx_eth.c | 146 | ||||
-rw-r--r-- | include/linux/mv643xx_eth.h | 3 |
2 files changed, 105 insertions, 44 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 3c859185399..287155ea9ce 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -103,16 +103,16 @@ static char mv643xx_eth_driver_version[] = "1.0"; #define INT_EXT_PHY 0x00010000 #define INT_EXT_TX_ERROR_0 0x00000100 #define INT_EXT_TX_0 0x00000001 -#define INT_EXT_TX 0x00000101 +#define INT_EXT_TX 0x0000ffff #define INT_MASK(p) (0x0468 + ((p) << 10)) #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) #define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) #define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) -#define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10)) -#define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10)) -#define TXQ_BW_CONF(p) (0x0704 + ((p) << 10)) -#define TXQ_BW_WRR_CONF(p) (0x0708 + ((p) << 10)) +#define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2)) +#define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4)) +#define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4)) +#define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4)) #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) @@ -303,6 +303,8 @@ struct rx_queue { }; struct tx_queue { + int index; + int tx_ring_size; int tx_desc_count; @@ -347,7 +349,9 @@ struct mv643xx_eth_private { int default_tx_ring_size; unsigned long tx_desc_sram_addr; int tx_desc_sram_size; - struct tx_queue txq[1]; + u8 txq_mask; + int txq_primary; + struct tx_queue txq[8]; #ifdef MV643XX_ETH_TX_FAST_REFILL int tx_clean_threshold; #endif @@ -374,7 +378,7 @@ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) { - return container_of(txq, struct mv643xx_eth_private, txq[0]); + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); } static void rxq_enable(struct rx_queue *rxq) @@ -396,13 +400,13 @@ static void rxq_disable(struct rx_queue *rxq) static void txq_enable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - wrl(mp, TXQ_COMMAND(mp->port_num), 1); + wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index); } static void txq_disable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - u8 mask = 1; + u8 mask = 1 << txq->index; wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) @@ -413,6 +417,12 @@ static void __txq_maybe_wake(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); + /* + * netif_{stop,wake}_queue() flow control only applies to + * the primary queue. + */ + BUG_ON(txq->index != mp->txq_primary); + if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) netif_wake_queue(mp->dev); } @@ -593,8 +603,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) #ifdef MV643XX_ETH_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { - txq_reclaim(mp->txq, 0); mp->tx_clean_threshold = 0; + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_reclaim(mp->txq + i, 0); } #endif @@ -754,8 +766,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct tx_queue *txq; unsigned long flags; - BUG_ON(netif_queue_stopped(dev)); - if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { stats->tx_dropped++; dev_printk(KERN_DEBUG, &dev->dev, @@ -766,13 +776,15 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&mp->lock, flags); - txq = mp->txq; + txq = mp->txq + mp->txq_primary; if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { - printk(KERN_ERR "%s: transmit with queue full\n", dev->name); - netif_stop_queue(dev); spin_unlock_irqrestore(&mp->lock, flags); - return NETDEV_TX_BUSY; + if (txq->index == mp->txq_primary && net_ratelimit()) + dev_printk(KERN_ERR, &dev->dev, + "primary tx queue full?!\n"); + kfree_skb(skb); + return NETDEV_TX_OK; } txq_submit_skb(txq, skb); @@ -780,8 +792,13 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) stats->tx_packets++; dev->trans_start = jiffies; - if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) - netif_stop_queue(dev); + if (txq->index == mp->txq_primary) { + int entries_left; + + entries_left = txq->tx_ring_size - txq->tx_desc_count; + if (entries_left < MAX_DESCS_PER_SKB) + netif_stop_queue(dev); + } spin_unlock_irqrestore(&mp->lock, flags); @@ -831,8 +848,8 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst) if (bucket_size > 65535) bucket_size = 65535; - wrl(mp, TXQ_BW_TOKENS(mp->port_num), token_rate << 14); - wrl(mp, TXQ_BW_CONF(mp->port_num), + wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14); + wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index), (bucket_size << 10) | token_rate); } @@ -848,7 +865,7 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) off = TXQ_FIX_PRIO_CONF(mp->port_num); val = rdl(mp, off); - val |= 1; + val |= 1 << txq->index; wrl(mp, off, val); } @@ -864,13 +881,13 @@ static void txq_set_wrr(struct tx_queue *txq, int weight) off = TXQ_FIX_PRIO_CONF(mp->port_num); val = rdl(mp, off); - val &= ~1; + val &= ~(1 << txq->index); wrl(mp, off, val); /* * Configure WRR weight for this queue. */ - off = TXQ_BW_WRR_CONF(mp->port_num); + off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); val = rdl(mp, off); val = (val & ~0xff) | (weight & 0xff); @@ -1415,13 +1432,15 @@ static void rxq_deinit(struct rx_queue *rxq) kfree(rxq->rx_skb); } -static int txq_init(struct mv643xx_eth_private *mp) +static int txq_init(struct mv643xx_eth_private *mp, int index) { - struct tx_queue *txq = mp->txq; + struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; int i; + txq->index = index; + txq->tx_ring_size = mp->default_tx_ring_size; txq->tx_desc_count = 0; @@ -1430,7 +1449,7 @@ static int txq_init(struct mv643xx_eth_private *mp) size = txq->tx_ring_size * sizeof(struct tx_desc); - if (size <= mp->tx_desc_sram_size) { + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; @@ -1467,7 +1486,7 @@ static int txq_init(struct mv643xx_eth_private *mp) out_free: - if (size <= mp->tx_desc_sram_size) + if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(NULL, size, @@ -1539,7 +1558,8 @@ static void txq_deinit(struct tx_queue *txq) BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); - if (txq->tx_desc_area_size <= mp->tx_desc_sram_size) + if (txq->index == mp->txq_primary && + txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(NULL, txq->tx_desc_area_size, @@ -1578,12 +1598,20 @@ static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) if ((pscr_o & SERIAL_PORT_ENABLE) == 0) wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); else { - txq_disable(mp->txq); + int i; + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_disable(mp->txq + i); + pscr_o &= ~SERIAL_PORT_ENABLE; wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); - txq_enable(mp->txq); + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_enable(mp->txq + i); } } } @@ -1609,13 +1637,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { if (mii_link_ok(&mp->mii)) { struct ethtool_cmd cmd; + int i; mii_ethtool_gset(&mp->mii, &cmd); update_pscr(mp, cmd.speed, cmd.duplex); - txq_enable(mp->txq); + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_enable(mp->txq + i); + if (!netif_carrier_ok(dev)) { netif_carrier_on(dev); - __txq_maybe_wake(mp->txq); + __txq_maybe_wake(mp->txq + mp->txq_primary); } } else if (netif_carrier_ok(dev)) { netif_stop_queue(dev); @@ -1643,9 +1675,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) } #endif + /* + * TxBuffer or TxError set for any of the 8 queues? + */ if (int_cause_ext & INT_EXT_TX) { - txq_reclaim(mp->txq, 0); - __txq_maybe_wake(mp->txq); + int i; + + for (i = 0; i < 8; i++) + if (mp->txq_mask & (1 << i)) + txq_reclaim(mp->txq + i, 0); + + __txq_maybe_wake(mp->txq + mp->txq_primary); } return IRQ_HANDLED; @@ -1696,11 +1736,14 @@ static void port_start(struct mv643xx_eth_private *mp) * Configure TX path and queues. */ tx_set_rate(mp, 1000000000, 16777216); - for (i = 0; i < 1; i++) { - struct tx_queue *txq = mp->txq; - int off = TXQ_CURRENT_DESC_PTR(mp->port_num); + for (i = 0; i < 8; i++) { + struct tx_queue *txq = mp->txq + i; + int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i); u32 addr; + if ((mp->txq_mask & (1 << i)) == 0) + continue; + addr = (u32)txq->tx_desc_dma; addr += txq->tx_curr_desc * sizeof(struct tx_desc); wrl(mp, off, addr); @@ -1801,9 +1844,18 @@ static int mv643xx_eth_open(struct net_device *dev) rxq_refill(mp->rxq + i); } - err = txq_init(mp); - if (err) - goto out_free; + for (i = 0; i < 8; i++) { + if ((mp->txq_mask & (1 << i)) == 0) + continue; + + err = txq_init(mp, i); + if (err) { + while (--i >= 0) + if (mp->txq_mask & (1 << i)) + txq_deinit(mp->txq + i); + goto out_free; + } + } #ifdef MV643XX_ETH_NAPI napi_enable(&mp->napi); @@ -1840,8 +1892,9 @@ static void port_reset(struct mv643xx_eth_private *mp) for (i = 0; i < 8; i++) { if (mp->rxq_mask & (1 << i)) rxq_disable(mp->rxq + i); + if (mp->txq_mask & (1 << i)) + txq_disable(mp->txq + i); } - txq_disable(mp->txq); while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) udelay(10); @@ -1875,8 +1928,9 @@ static int mv643xx_eth_stop(struct net_device *dev) for (i = 0; i < 8; i++) { if (mp->rxq_mask & (1 << i)) rxq_deinit(mp->rxq + i); + if (mp->txq_mask & (1 << i)) + txq_deinit(mp->txq + i); } - txq_deinit(mp->txq); return 0; } @@ -1928,7 +1982,7 @@ static void tx_timeout_task(struct work_struct *ugly) port_reset(mp); port_start(mp); - __txq_maybe_wake(mp->txq); + __txq_maybe_wake(mp->txq + mp->txq_primary); } } @@ -2139,6 +2193,12 @@ static void set_params(struct mv643xx_eth_private *mp, mp->default_tx_ring_size = pd->tx_queue_size; mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_size = pd->tx_sram_size; + + if (pd->tx_queue_mask) + mp->txq_mask = pd->tx_queue_mask; + else + mp->txq_mask = 0x01; + mp->txq_primary = fls(mp->txq_mask) - 1; } static int phy_detect(struct mv643xx_eth_private *mp) diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 1afd7ba6d30..12078577aef 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -49,9 +49,10 @@ struct mv643xx_eth_platform_data { int duplex; /* - * Which RX queues to use. + * Which RX/TX queues to use. */ int rx_queue_mask; + int tx_queue_mask; /* * Override default RX/TX queue sizes if nonzero. |