diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-03-18 21:38:20 +0000 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-03-18 21:38:20 +0000 |
commit | c592c761a36286ab83451daa37a21c8558ea99c0 (patch) | |
tree | 8abf57708fabf2a915320e9515b1ae2730ebf499 /drivers/net/ethernet/marvell | |
parent | 63236f4038f7e14762114606d95769c32cf6cac1 (diff) | |
parent | 33499df88b711725ee473ab5478e17efd21de4b0 (diff) |
Merge remote-tracking branch 'regulator/topic/stub' into regulator-next
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r-- | drivers/net/ethernet/marvell/mv643xx_eth.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 38 |
2 files changed, 39 insertions, 13 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9c049d2cb97..9edecfa1f0f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define INT_MASK 0x0068 #define INT_MASK_EXT 0x006c #define TX_FIFO_URGENT_THRESHOLD 0x0074 +#define RX_DISCARD_FRAME_CNT 0x0084 +#define RX_OVERRUN_FRAME_CNT 0x0088 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc #define TX_BW_RATE_MOVED 0x00e0 #define TX_BW_MTU_MOVED 0x00e8 @@ -334,6 +336,9 @@ struct mib_counters { u32 bad_crc_event; u32 collision; u32 late_collision; + /* Non MIB hardware counters */ + u32 rx_discard; + u32 rx_overrun; }; struct lro_counters { @@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp) for (i = 0; i < 0x80; i += 4) mib_read(mp, i); + + /* Clear non MIB hw counters also */ + rdlp(mp, RX_DISCARD_FRAME_CNT); + rdlp(mp, RX_OVERRUN_FRAME_CNT); } static void mib_counters_update(struct mv643xx_eth_private *mp) @@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->bad_crc_event += mib_read(mp, 0x74); p->collision += mib_read(mp, 0x78); p->late_collision += mib_read(mp, 0x7c); + /* Non MIB hardware counters */ + p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); + p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); @@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { MIBSTAT(bad_crc_event), MIBSTAT(collision), MIBSTAT(late_collision), + MIBSTAT(rx_discard), + MIBSTAT(rx_overrun), LROSTAT(lro_aggregated), LROSTAT(lro_flushed), LROSTAT(lro_no_desc), diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 18a87a57fc0..33947ac595c 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2576,6 +2576,7 @@ static int skge_up(struct net_device *dev) } /* Initialize MAC */ + netif_carrier_off(dev); spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) genesis_mac_init(hw, port); @@ -2797,6 +2798,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; wmb(); + netdev_sent_queue(dev, skb->len); + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, @@ -2816,11 +2819,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, /* Free resources associated with this reing element */ -static void skge_tx_free(struct skge_port *skge, struct skge_element *e, - u32 control) +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) { - struct pci_dev *pdev = skge->hw->pdev; - /* skb header vs. fragment */ if (control & BMU_STF) pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), @@ -2830,13 +2831,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e, pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_TODEVICE); - - if (control & BMU_EOF) { - netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, - "tx done slot %td\n", e - skge->tx_ring.start); - - dev_kfree_skb(e->skb); - } } /* Free all buffers in transmit ring */ @@ -2847,10 +2841,15 @@ static void skge_tx_clean(struct net_device *dev) for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { struct skge_tx_desc *td = e->desc; - skge_tx_free(skge, e, td->control); + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); td->control = 0; } + netdev_reset_queue(dev); skge->tx_ring.to_clean = e; } @@ -3111,6 +3110,7 @@ static void skge_tx_done(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->tx_ring; struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); @@ -3120,8 +3120,20 @@ static void skge_tx_done(struct net_device *dev) if (control & BMU_OWN) break; - skge_tx_free(skge, e, control); + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_kfree_skb(e->skb); + } } + netdev_completed_queue(dev, pkts_compl, bytes_compl); skge->tx_ring.to_clean = e; /* Can run lockless until we need to synchronize to restart queue. */ |