diff options
Diffstat (limited to 'drivers/net/greth.c')
-rw-r--r-- | drivers/net/greth.c | 221 |
1 files changed, 131 insertions, 90 deletions
diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 27d6960ce09..fdb0333f5cb 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1,7 +1,7 @@ /* * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. * - * 2005-2009 (c) Aeroflex Gaisler AB + * 2005-2010 (c) Aeroflex Gaisler AB * * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs * available in the GRLIB VHDL IP core library. @@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); + GRETH_REGSAVE(greth->regs->status, 0xFF); + napi_enable(&greth->napi); greth_enable_irqs(greth); @@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev) napi_disable(&greth->napi); + greth_disable_irqs(greth); greth_disable_tx(greth); + greth_disable_rx(greth); netif_stop_queue(dev); @@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; - u32 status, dma_addr; + u32 status, dma_addr, ctrl; + unsigned long flags; - bdp = greth->tx_bd_base + greth->tx_next; + /* Clean TX Ring */ + greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } @@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } + bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); - status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); + status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { @@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; - /* No more descriptors */ - if (unlikely(greth->tx_free == 0)) { - - /* Free transmitted descriptors */ - greth_clean_tx(dev); - - /* If nothing was cleaned, stop queue & wait for irq */ - if (unlikely(greth->tx_free == 0)) { - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } - } - /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); @@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; - u32 status = 0, dma_addr; + u32 status = 0, dma_addr, ctrl; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; + unsigned long flags; nr_frags = skb_shinfo(skb)->nr_frags; + /* Clean TX Ring */ + greth_clean_tx_gbit(dev); + if (greth->tx_free < nr_frags + 1) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } @@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; - status = GRETH_TXBD_CSALL; + status = GRETH_TXBD_CSALL | GRETH_BD_EN; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ @@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; - - /* ... last fragment, check if out of descriptors */ - else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { - - /* Enable interrupts and stop queue */ - status |= GRETH_BD_IE; - netif_stop_queue(dev); - } + else + status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); @@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) wmb(); - /* Enable the descriptors that we configured ... */ - for (i = 0; i < nr_frags + 1; i++) { - bdp = greth->tx_bd_base + greth->tx_next; - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); - greth->tx_next = NEXT_TX(greth->tx_next); - greth->tx_free--; - } + /* Enable the descriptor chain by enabling the first descriptor */ + bdp = greth->tx_bd_base + greth->tx_next; + greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); + greth->tx_next = curr_tx; + greth->tx_free -= nr_frags + 1; + wmb(); + + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: - /* Unmap SKB mappings that succeeded */ + /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); + greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) @@ -565,12 +574,11 @@ out: return err; } - static irqreturn_t greth_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct greth_private *greth; - u32 status; + u32 status, ctrl; irqreturn_t retval = IRQ_NONE; greth = netdev_priv(dev); @@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) /* Get the interrupt events that caused us to be here. */ status = GRETH_REGLOAD(greth->regs->status); - /* Handle rx and tx interrupts through poll */ - if (status & (GRETH_INT_RX | GRETH_INT_TX)) { - - /* Clear interrupt status */ - GRETH_REGORIN(greth->regs->status, - status & (GRETH_INT_RX | GRETH_INT_TX)); + /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be + * set regardless of whether IRQ is enabled or not. Especially + * important when shared IRQ. + */ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Handle rx and tx interrupts through poll */ + if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || + ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { retval = IRQ_HANDLED; /* Disable interrupts and schedule poll() */ @@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev) while (1) { bdp = greth->tx_bd_base + greth->tx_last; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); stat = greth_read_bd(&bdp->stat); if (unlikely(stat & GRETH_BD_EN)) @@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev) /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); - stat = bdp_last_frag->stat; + + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); + stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; @@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev) greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } - if (greth->tx_free > (MAX_SKB_FRAGS + 1)) { - netif_wake_queue(dev); - } -} -static int greth_pending_packets(struct greth_private *greth) -{ - struct greth_bd *bdp; - u32 status; - bdp = greth->rx_bd_base + greth->rx_cur; - status = greth_read_bd(&bdp->stat); - if (status & GRETH_BD_EN) - return 0; - else - return 1; + if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); } static int greth_rx(struct net_device *dev, int limit) @@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit) int pkt_len; int bad, count; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); - dma_addr = greth_read_bd(&bdp->addr); - bad = 0; if (unlikely(status & GRETH_BD_EN)) { break; } + dma_addr = greth_read_bd(&bdp->addr); + bad = 0; + /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { @@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit) dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); + spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) int pkt_len; int bad, count = 0; u32 status, dma_addr; + unsigned long flags; greth = netdev_priv(dev); @@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit) bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); status = greth_read_bd(&bdp->stat); bad = 0; @@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) } } - /* Allocate new skb to replace current */ - newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); - - if (!bad && newskb) { + /* Allocate new skb to replace current, not needed if the + * current skb can be reused */ + if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, @@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit) if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } + } else if (bad) { + /* Bad Frame transfer, the skb is reused */ + dev->stats.rx_dropped++; } else { + /* Failed Allocating a new skb. This is rather stupid + * but the current "filled" skb is reused, as if + * transfer failure. One could argue that RX descriptor + * table handling should be divided into cleaning and + * filling as the TX part of the driver + */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); + /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } @@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) wmb(); greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } @@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; + unsigned long flags; + u32 mask, ctrl; greth = container_of(napi, struct greth_private, napi); - if (greth->gbit_mac) { - greth_clean_tx_gbit(greth->netdev); - } else { - greth_clean_tx(greth->netdev); +restart_txrx_poll: + if (netif_queue_stopped(greth->netdev)) { + if (greth->gbit_mac) + greth_clean_tx_gbit(greth->netdev); + else + greth_clean_tx(greth->netdev); } -restart_poll: if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { @@ -949,15 +976,29 @@ restart_poll: if (work_done < budget) { - napi_complete(napi); + spin_lock_irqsave(&greth->devlock, flags); + + ctrl = GRETH_REGLOAD(greth->regs->control); + if (netif_queue_stopped(greth->netdev)) { + GRETH_REGSAVE(greth->regs->control, + ctrl | GRETH_TXI | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE | + GRETH_INT_TX | GRETH_INT_TE; + } else { + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE; + } - if (greth_pending_packets(greth)) { - napi_reschedule(napi); - goto restart_poll; + if (GRETH_REGLOAD(greth->regs->status) & mask) { + GRETH_REGSAVE(greth->regs->control, ctrl); + spin_unlock_irqrestore(&greth->devlock, flags); + goto restart_txrx_poll; + } else { + __napi_complete(napi); + spin_unlock_irqrestore(&greth->devlock, flags); } } - greth_enable_irqs(greth); return work_done; } @@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = { }; static struct net_device_ops greth_netdev_ops = { - .ndo_open = greth_open, - .ndo_stop = greth_close, - .ndo_start_xmit = greth_start_xmit, - .ndo_set_mac_address = greth_set_mac_add, - .ndo_validate_addr = eth_validate_addr, + .ndo_open = greth_open, + .ndo_stop = greth_close, + .ndo_start_xmit = greth_start_xmit, + .ndo_set_mac_address = greth_set_mac_add, + .ndo_validate_addr = eth_validate_addr, }; static inline int wait_for_mdio(struct greth_private *greth) @@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev) struct greth_private *greth = netdev_priv(dev); struct phy_device *phydev = greth->phy; unsigned long flags; - int status_change = 0; + u32 ctrl; spin_lock_irqsave(&greth->devlock, flags); if (phydev->link) { if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { - - GRETH_REGANDIN(greth->regs->control, - ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB)); + ctrl = GRETH_REGLOAD(greth->regs->control) & + ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); if (phydev->duplex) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); - - if (phydev->speed == SPEED_100) { - - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP); - } + ctrl |= GRETH_CTRL_FD; + if (phydev->speed == SPEED_100) + ctrl |= GRETH_CTRL_SP; else if (phydev->speed == SPEED_1000) - GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); + ctrl |= GRETH_CTRL_GB; + GRETH_REGSAVE(greth->regs->control, ctrl); greth->speed = phydev->speed; greth->duplex = phydev->duplex; status_change = 1; @@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = { { .name = "GAISLER_ETHMAC", }, + { + .name = "01_01d", + }, {}, }; |