summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/skge.c109
2 files changed, 97 insertions, 26 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 9c049d2cb97..9edecfa1f0f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define INT_MASK 0x0068
#define INT_MASK_EXT 0x006c
#define TX_FIFO_URGENT_THRESHOLD 0x0074
+#define RX_DISCARD_FRAME_CNT 0x0084
+#define RX_OVERRUN_FRAME_CNT 0x0088
#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
#define TX_BW_RATE_MOVED 0x00e0
#define TX_BW_MTU_MOVED 0x00e8
@@ -334,6 +336,9 @@ struct mib_counters {
u32 bad_crc_event;
u32 collision;
u32 late_collision;
+ /* Non MIB hardware counters */
+ u32 rx_discard;
+ u32 rx_overrun;
};
struct lro_counters {
@@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp)
for (i = 0; i < 0x80; i += 4)
mib_read(mp, i);
+
+ /* Clear non MIB hw counters also */
+ rdlp(mp, RX_DISCARD_FRAME_CNT);
+ rdlp(mp, RX_OVERRUN_FRAME_CNT);
}
static void mib_counters_update(struct mv643xx_eth_private *mp)
@@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->bad_crc_event += mib_read(mp, 0x74);
p->collision += mib_read(mp, 0x78);
p->late_collision += mib_read(mp, 0x7c);
+ /* Non MIB hardware counters */
+ p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
+ p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
@@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
MIBSTAT(bad_crc_event),
MIBSTAT(collision),
MIBSTAT(late_collision),
+ MIBSTAT(rx_discard),
+ MIBSTAT(rx_overrun),
LROSTAT(lro_aggregated),
LROSTAT(lro_flushed),
LROSTAT(lro_no_desc),
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 18a87a57fc0..edb9bda55d5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
}
/* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct pci_dev *pdev,
+ struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- u64 map;
+ dma_addr_t map;
- map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
+ map = pci_map_single(pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto mapping_error;
- rd->dma_lo = map;
- rd->dma_hi = map >> 32;
+ rd->dma_lo = lower_32_bits(map);
+ rd->dma_hi = upper_32_bits(map);
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
+ return 0;
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
/* Resume receiving using existing skb,
@@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+ if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev)
}
/* Initialize MAC */
+ netif_carrier_off(dev);
spin_lock_bh(&hw->phy_lock);
if (is_genesis(hw))
genesis_mac_init(hw, port);
@@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *td;
int i;
u32 control, len;
- u64 map;
+ dma_addr_t map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(hw->pdev, map))
+ goto mapping_error;
+
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = map;
- td->dma_hi = map >> 32;
+ td->dma_lo = lower_32_bits(map);
+ td->dma_hi = upper_32_bits(map);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = map;
- tf->dma_hi = (u64) map >> 32;
+ tf->dma_lo = lower_32_bits(map);
+ tf->dma_hi = upper_32_bits(map);
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
}
return NETDEV_TX_OK;
+
+mapping_unwind:
+ /* unroll any pages that were already mapped. */
+ if (e != skge->tx_ring.to_use) {
+ struct skge_element *u;
+
+ for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
+ pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
+ dma_unmap_len(u, maplen),
+ PCI_DMA_TODEVICE);
+ e = skge->tx_ring.to_use;
+ }
+ /* undo the mapping for the skb header */
+ pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+mapping_error:
+ /* mapping error causes error message and packet to be discarded. */
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
/* Free resources associated with this reing element */
-static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
- u32 control)
+static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
+ u32 control)
{
- struct pci_dev *pdev = skge->hw->pdev;
-
/* skb header vs. fragment */
if (control & BMU_STF)
pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
-
- if (control & BMU_EOF) {
- netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
- "tx done slot %td\n", e - skge->tx_ring.start);
-
- dev_kfree_skb(e->skb);
- }
}
/* Free all buffers in transmit ring */
@@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
- skge_tx_free(skge, e, td->control);
+
+ skge_tx_unmap(skge->hw->pdev, e, td->control);
+
+ if (td->control & BMU_EOF)
+ dev_kfree_skb(e->skb);
td->control = 0;
}
+ netdev_reset_queue(dev);
skge->tx_ring.to_clean = e;
}
@@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
+ if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
+ dev_kfree_skb(nskb);
+ goto resubmit;
+ }
+
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
- skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
@@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
@@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
if (control & BMU_OWN)
break;
- skge_tx_free(skge, e, control);
+ skge_tx_unmap(skge->hw->pdev, e, control);
+
+ if (control & BMU_EOF) {
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n",
+ e - skge->tx_ring.start);
+
+ pkts_compl++;
+ bytes_compl += e->skb->len;
+
+ dev_kfree_skb(e->skb);
+ }
}
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
skge->tx_ring.to_clean = e;
/* Can run lockless until we need to synchronize to restart queue. */