diff options
author | Sandeep Gopalpet <Sandeep.Kumar@freescale.com> | 2009-11-02 07:03:00 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-02 23:40:55 -0800 |
commit | a12f801d4b349bc57622584e70e45a4ccbef53b6 (patch) | |
tree | 1b081795127d9e47aa5bac516fededa736dfc394 /drivers/net/gianfar.c | |
parent | 123b43e9716115302a0095e14f2c545811712715 (diff) |
gianfar: Add per queue structure support
This patch introduces per tx and per rx queue structures.
Earlier the members of these structures were inside the
gfar_private structure.
Moving forward if we want to support multiple queues, we need
to refactor the gfar_private structure so that introduction of
multiple queues is easier.
Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 384 |
1 files changed, 228 insertions, 156 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index f7141865869..354b2b5936e 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -8,9 +8,10 @@ * * Author: Andy Fleming * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * - * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. - * Copyright (c) 2007 MontaVista Software, Inc. + * Copyright 2002-2009 Freescale Semiconductor, Inc. + * Copyright 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev); -static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); @@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif -int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); -static int gfar_clean_tx_ring(struct net_device *dev); +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull); static void gfar_vlan_rx_register(struct net_device *netdev, @@ -147,16 +148,16 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); -static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp, +static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, dma_addr_t buf) { - struct gfar_private *priv = netdev_priv(dev); + struct net_device *dev = rx_queue->dev; u32 lstatus; bdp->bufPtr = buf; lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); - if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) + if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) lstatus |= BD_LFLAG(RXBD_WRAP); eieio(); @@ -167,20 +168,25 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp, static int gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; struct rxbd8 *rxbdp; int i; + tx_queue = priv->tx_queue; + rx_queue = priv->rx_queue; + /* Initialize some variables in our dev structure */ - priv->num_txbdfree = priv->tx_ring_size; - priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; - priv->cur_rx = priv->rx_bd_base; - priv->skb_curtx = priv->skb_dirtytx = 0; - priv->skb_currx = 0; + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base; + rx_queue->cur_rx = rx_queue->rx_bd_base; + tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0; + rx_queue->skb_currx = 0; /* Initialize Transmit Descriptor Ring */ - txbdp = priv->tx_bd_base; - for (i = 0; i < priv->tx_ring_size; i++) { + txbdp = tx_queue->tx_bd_base; + for (i = 0; i < tx_queue->tx_ring_size; i++) { txbdp->lstatus = 0; txbdp->bufPtr = 0; txbdp++; @@ -190,12 +196,12 @@ static int gfar_init_bds(struct net_device *ndev) txbdp--; txbdp->status |= TXBD_WRAP; - rxbdp = priv->rx_bd_base; - for (i = 0; i < priv->rx_ring_size; i++) { - struct sk_buff *skb = priv->rx_skbuff[i]; + rxbdp = rx_queue->rx_bd_base; + for (i = 0; i < rx_queue->rx_ring_size; i++) { + struct sk_buff *skb = rx_queue->rx_skbuff[i]; if (skb) { - gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr); + gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr); } else { skb = gfar_new_skb(ndev); if (!skb) { @@ -203,9 +209,9 @@ static int gfar_init_bds(struct net_device *ndev) ndev->name); return -ENOMEM; } - priv->rx_skbuff[i] = skb; + rx_queue->rx_skbuff[i] = skb; - gfar_new_rxbdp(ndev, rxbdp, skb); + gfar_new_rxbdp(rx_queue, rxbdp, skb); } rxbdp++; @@ -220,12 +226,17 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) int i; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = &priv->ofdev->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + tx_queue = priv->tx_queue; + rx_queue = priv->rx_queue; /* Allocate memory for the buffer descriptors */ vaddr = dma_alloc_coherent(dev, - sizeof(*priv->tx_bd_base) * priv->tx_ring_size + - sizeof(*priv->rx_bd_base) * priv->rx_ring_size, - &priv->tx_bd_dma_base, GFP_KERNEL); + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size + + sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size, + &tx_queue->tx_bd_dma_base, GFP_KERNEL); if (!vaddr) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate buffer descriptors!\n", @@ -233,36 +244,38 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) return -ENOMEM; } - priv->tx_bd_base = vaddr; + tx_queue->tx_bd_base = vaddr; + tx_queue->dev = ndev; /* Start the rx descriptor ring where the tx ring leaves off */ - vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size; - priv->rx_bd_base = vaddr; + vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size; + rx_queue->rx_bd_base = vaddr; + rx_queue->dev = ndev; /* Setup the skbuff rings */ - priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) * - priv->tx_ring_size, GFP_KERNEL); - if (!priv->tx_skbuff) { + tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * + tx_queue->tx_ring_size, GFP_KERNEL); + if (!tx_queue->tx_skbuff) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate tx_skbuff\n", ndev->name); goto cleanup; } - for (i = 0; i < priv->tx_ring_size; i++) - priv->tx_skbuff[i] = NULL; + for (i = 0; i < tx_queue->tx_ring_size; i++) + tx_queue->tx_skbuff[i] = NULL; - priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) * - priv->rx_ring_size, GFP_KERNEL); - if (!priv->rx_skbuff) { + rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * + rx_queue->rx_ring_size, GFP_KERNEL); + if (!rx_queue->rx_skbuff) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate rx_skbuff\n", ndev->name); goto cleanup; } - for (i = 0; i < priv->rx_ring_size; i++) - priv->rx_skbuff[i] = NULL; + for (i = 0; i < rx_queue->rx_ring_size; i++) + rx_queue->rx_skbuff[i] = NULL; if (gfar_init_bds(ndev)) goto cleanup; @@ -278,24 +291,29 @@ static void gfar_init_mac(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; u32 rctrl = 0; u32 tctrl = 0; u32 attrs = 0; + tx_queue = priv->tx_queue; + rx_queue = priv->rx_queue; + /* enet DMA only understands physical addresses */ - gfar_write(®s->tbase0, priv->tx_bd_dma_base); - gfar_write(®s->rbase0, priv->tx_bd_dma_base + - sizeof(*priv->tx_bd_base) * - priv->tx_ring_size); + gfar_write(®s->tbase0, tx_queue->tx_bd_dma_base); + gfar_write(®s->rbase0, tx_queue->tx_bd_dma_base + + sizeof(*tx_queue->tx_bd_base) * + tx_queue->tx_ring_size); /* Configure the coalescing support */ gfar_write(®s->txic, 0); - if (priv->txcoalescing) - gfar_write(®s->txic, priv->txic); + if (tx_queue->txcoalescing) + gfar_write(®s->txic, tx_queue->txic); gfar_write(®s->rxic, 0); - if (priv->rxcoalescing) - gfar_write(®s->rxic, priv->rxic); + if (rx_queue->rxcoalescing) + gfar_write(®s->rxic, rx_queue->rxic); if (priv->rx_csum_enable) rctrl |= RCTRL_CHECKSUMMING; @@ -414,7 +432,7 @@ static int gfar_of_init(struct net_device *dev) stash = of_get_property(np, "bd-stash", NULL); - if(stash) { + if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } @@ -519,8 +537,18 @@ static int gfar_probe(struct of_device *ofdev, if (err) goto regs_fail; - spin_lock_init(&priv->txlock); - spin_lock_init(&priv->rxlock); + priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc( + sizeof (struct gfar_priv_tx_q), GFP_KERNEL); + if (!priv->tx_queue) + goto regs_fail; + + priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc( + sizeof (struct gfar_priv_rx_q), GFP_KERNEL); + if (!priv->rx_queue) + goto rx_queue_fail; + + spin_lock_init(&priv->tx_queue->txlock); + spin_lock_init(&priv->rx_queue->rxlock); spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); @@ -552,12 +580,13 @@ static int gfar_probe(struct of_device *ofdev, /* Fill in the dev structure */ dev->watchdog_timeo = TX_TIMEOUT; - netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); dev->mtu = 1500; - dev->netdev_ops = &gfar_netdev_ops; dev->ethtool_ops = &gfar_ethtool_ops; + /* Register for napi ...NAPI is for each rx_queue */ + netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { priv->rx_csum_enable = 1; dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; @@ -613,14 +642,16 @@ static int gfar_probe(struct of_device *ofdev, dev->hard_header_len += GMAC_FCB_LEN; priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - priv->tx_ring_size = DEFAULT_TX_RING_SIZE; - priv->rx_ring_size = DEFAULT_RX_RING_SIZE; - priv->num_txbdfree = DEFAULT_TX_RING_SIZE; - priv->txcoalescing = DEFAULT_TX_COALESCE; - priv->txic = DEFAULT_TXIC; - priv->rxcoalescing = DEFAULT_RX_COALESCE; - priv->rxic = DEFAULT_RXIC; + /* Initializing some of the rx/tx queue level parameters */ + priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE; + priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE; + priv->tx_queue->txic = DEFAULT_TXIC; + + priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE; + priv->rx_queue->rxic = DEFAULT_RXIC; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; @@ -666,12 +697,15 @@ static int gfar_probe(struct of_device *ofdev, /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", - dev->name, priv->rx_ring_size, priv->tx_ring_size); + dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size); return 0; register_fail: iounmap(priv->regs); + kfree(priv->rx_queue); +rx_queue_fail: + kfree(priv->tx_queue); regs_fail: if (priv->phy_node) of_node_put(priv->phy_node); @@ -705,6 +739,8 @@ static int gfar_suspend(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; unsigned long flags; u32 tempval; @@ -712,10 +748,12 @@ static int gfar_suspend(struct device *dev) (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); netif_device_detach(ndev); + tx_queue = priv->tx_queue; + rx_queue = priv->rx_queue; if (netif_running(ndev)) { - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + spin_lock_irqsave(&tx_queue->txlock, flags); + spin_lock(&rx_queue->rxlock); gfar_halt_nodisable(ndev); @@ -729,10 +767,10 @@ static int gfar_suspend(struct device *dev) gfar_write(&priv->regs->maccfg1, tempval); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock(&rx_queue->rxlock); + spin_unlock_irqrestore(&tx_queue->txlock, flags); - napi_disable(&priv->napi); + napi_disable(&rx_queue->napi); if (magic_packet) { /* Enable interrupt on Magic Packet */ @@ -754,6 +792,8 @@ static int gfar_resume(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && @@ -770,9 +810,11 @@ static int gfar_resume(struct device *dev) /* Disable Magic Packet mode, in case something * else woke us up. */ + rx_queue = priv->rx_queue; + tx_queue = priv->tx_queue; - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + spin_lock_irqsave(&tx_queue->txlock, flags); + spin_lock(&rx_queue->rxlock); tempval = gfar_read(&priv->regs->maccfg2); tempval &= ~MACCFG2_MPEN; @@ -780,12 +822,12 @@ static int gfar_resume(struct device *dev) gfar_start(ndev); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock(&rx_queue->rxlock); + spin_unlock_irqrestore(&tx_queue->txlock, flags); netif_device_attach(ndev); - napi_enable(&priv->napi); + napi_enable(&rx_queue->napi); return 0; } @@ -1060,18 +1102,23 @@ void gfar_halt(struct net_device *dev) void stop_gfar(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; unsigned long flags; phy_stop(priv->phydev); + tx_queue = priv->tx_queue; + rx_queue = priv->rx_queue; + /* Lock it down */ - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + spin_lock_irqsave(&tx_queue->txlock, flags); + spin_lock(&rx_queue->rxlock); gfar_halt(dev); - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock(&rx_queue->rxlock); + spin_unlock_irqrestore(&tx_queue->txlock, flags); /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { @@ -1092,46 +1139,50 @@ static void free_skb_resources(struct gfar_private *priv) struct device *dev = &priv->ofdev->dev; struct rxbd8 *rxbdp; struct txbd8 *txbdp; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; int i, j; /* Go through all the buffer descriptors and free their data buffers */ - txbdp = priv->tx_bd_base; + tx_queue = priv->tx_queue; + txbdp = tx_queue->tx_bd_base; - if (!priv->tx_skbuff) + if (!tx_queue->tx_skbuff) goto skip_tx_skbuff; - for (i = 0; i < priv->tx_ring_size; i++) { - if (!priv->tx_skbuff[i]) + for (i = 0; i < tx_queue->tx_ring_size; i++) { + if (!tx_queue->tx_skbuff[i]) continue; dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); txbdp->lstatus = 0; - for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); } txbdp++; - dev_kfree_skb_any(priv->tx_skbuff[i]); - priv->tx_skbuff[i] = NULL; + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); + tx_queue->tx_skbuff[i] = NULL; } - kfree(priv->tx_skbuff); + kfree(tx_queue->tx_skbuff); skip_tx_skbuff: - rxbdp = priv->rx_bd_base; + rx_queue = priv->rx_queue; + rxbdp = rx_queue->rx_bd_base; - if (!priv->rx_skbuff) + if (!rx_queue->rx_skbuff) goto skip_rx_skbuff; - for (i = 0; i < priv->rx_ring_size; i++) { - if (priv->rx_skbuff[i]) { + for (i = 0; i < rx_queue->rx_ring_size; i++) { + if (rx_queue->rx_skbuff[i]) { dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); - priv->rx_skbuff[i] = NULL; + dev_kfree_skb_any(rx_queue->rx_skbuff[i]); + rx_queue->rx_skbuff[i] = NULL; } rxbdp->lstatus = 0; @@ -1139,17 +1190,19 @@ skip_tx_skbuff: rxbdp++; } - kfree(priv->rx_skbuff); + kfree(rx_queue->rx_skbuff); skip_rx_skbuff: - dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + - sizeof(*rxbdp) * priv->rx_ring_size, - priv->tx_bd_base, priv->tx_bd_dma_base); + dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size + + sizeof(*rxbdp) * rx_queue->rx_ring_size, + tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base); } void gfar_start(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue; + struct gfar_priv_rx_q *rx_queue; struct gfar __iomem *regs = priv->regs; u32 tempval; @@ -1258,7 +1311,7 @@ static int gfar_enet_open(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); int err; - napi_enable(&priv->napi); + napi_enable(&priv->rx_queue->napi); skb_queue_head_init(&priv->rx_recycle); @@ -1269,14 +1322,14 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); - if(err) { - napi_disable(&priv->napi); + if (err) { + napi_disable(&priv->rx_queue->napi); return err; } err = startup_gfar(dev); if (err) { - napi_disable(&priv->napi); + napi_disable(&priv->rx_queue->napi); return err; } @@ -1349,6 +1402,7 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base; u32 lstatus; @@ -1357,7 +1411,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; unsigned int nr_frags, length; - base = priv->tx_bd_base; + tx_queue = priv->tx_queue; + base = tx_queue->tx_bd_base; /* make space for additional header when fcb is needed */ if (((skb->ip_summed == CHECKSUM_PARTIAL) || @@ -1378,21 +1433,21 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&priv->txlock, flags); + spin_lock_irqsave(&tx_queue->txlock, flags); /* check if there is space to queue this packet */ - if ((nr_frags+1) > priv->num_txbdfree) { + if ((nr_frags+1) > tx_queue->num_txbdfree) { /* no space, stop the queue */ netif_stop_queue(dev); dev->stats.tx_fifo_errors++; - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_BUSY; } /* Update transmit stats */ dev->stats.tx_bytes += skb->len; - txbdp = txbdp_start = priv->cur_tx; + txbdp = txbdp_start = tx_queue->cur_tx; if (nr_frags == 0) { lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); @@ -1400,7 +1455,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { /* Point at the next BD, wrapping as needed */ - txbdp = next_txbd(txbdp, base, priv->tx_ring_size); + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); length = skb_shinfo(skb)->frags[i].size; @@ -1442,7 +1497,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* setup the TxBD length and buffer pointer for the first BD */ - priv->tx_skbuff[priv->skb_curtx] = skb; + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); @@ -1462,19 +1517,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Update the current skb pointer to the next entry we will use * (wrapping if necessary) */ - priv->skb_curtx = (priv->skb_curtx + 1) & - TX_RING_MOD_MASK(priv->tx_ring_size); + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & + TX_RING_MOD_MASK(tx_queue->tx_ring_size); - priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); /* reduce TxBD free count */ - priv->num_txbdfree -= (nr_frags + 1); + tx_queue->num_txbdfree -= (nr_frags + 1); dev->trans_start = jiffies; /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ - if (!priv->num_txbdfree) { + if (!tx_queue->num_txbdfree) { netif_stop_queue(dev); dev->stats.tx_fifo_errors++; @@ -1484,7 +1539,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); /* Unlock priv */ - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_OK; } @@ -1494,7 +1549,7 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - napi_disable(&priv->napi); + napi_disable(&priv->rx_queue->napi); skb_queue_purge(&priv->rx_recycle); cancel_work_sync(&priv->reset_task); @@ -1523,10 +1578,12 @@ static void gfar_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; unsigned long flags; u32 tempval; - spin_lock_irqsave(&priv->rxlock, flags); + rx_queue = priv->rx_queue; + spin_lock_irqsave(&rx_queue->rxlock, flags); priv->vlgrp = grp; @@ -1560,7 +1617,7 @@ static void gfar_vlan_rx_register(struct net_device *dev, gfar_change_mtu(dev, dev->mtu); - spin_unlock_irqrestore(&priv->rxlock, flags); + spin_unlock_irqrestore(&rx_queue->rxlock, flags); } static int gfar_change_mtu(struct net_device *dev, int new_mtu) @@ -1649,24 +1706,27 @@ static void gfar_timeout(struct net_device *dev) } /* Interrupt Handler for Transmit complete */ -static int gfar_clean_tx_ring(struct net_device *dev) +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { + struct net_device *dev = tx_queue->dev; struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp; struct txbd8 *lbdp = NULL; - struct txbd8 *base = priv->tx_bd_base; + struct txbd8 *base = tx_queue->tx_bd_base; struct sk_buff *skb; int skb_dirtytx; - int tx_ring_size = priv->tx_ring_size; + int tx_ring_size = tx_queue->tx_ring_size; int frags = 0; int i; int howmany = 0; u32 lstatus; - bdp = priv->dirty_tx; - skb_dirtytx = priv->skb_dirtytx; + rx_queue = priv->rx_queue; + bdp = tx_queue->dirty_tx; + skb_dirtytx = tx_queue->skb_dirtytx; - while ((skb = priv->tx_skbuff[skb_dirtytx])) { + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { frags = skb_shinfo(skb)->nr_frags; lbdp = skip_txbd(bdp, frags, base, tx_ring_size); @@ -1698,29 +1758,29 @@ static int gfar_clean_tx_ring(struct net_device *dev) * If there's room in the queue (limit it to rx_buffer_size) * we add this skb back into the pool, if it's the right size */ - if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && + if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && skb_recycle_check(skb, priv->rx_buffer_size + RXBUF_ALIGNMENT)) __skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb_any(skb); - priv->tx_skbuff[skb_dirtytx] = NULL; + tx_queue->tx_skbuff[skb_dirtytx] = NULL; skb_dirtytx = (skb_dirtytx + 1) & TX_RING_MOD_MASK(tx_ring_size); howmany++; - priv->num_txbdfree += frags + 1; + tx_queue->num_txbdfree += frags + 1; } /* If we freed a buffer, we can restart transmission, if necessary */ - if (netif_queue_stopped(dev) && priv->num_txbdfree) + if (netif_queue_stopped(dev) && tx_queue->num_txbdfree) netif_wake_queue(dev); /* Update dirty indicators */ - priv->skb_dirtytx = skb_dirtytx; - priv->dirty_tx = bdp; + tx_queue->skb_dirtytx = skb_dirtytx; + tx_queue->dirty_tx = bdp; dev->stats.tx_packets += howmany; @@ -1730,14 +1790,18 @@ static int gfar_clean_tx_ring(struct net_device *dev) static void gfar_schedule_cleanup(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; unsigned long flags; - spin_lock_irqsave(&priv->txlock, flags); - spin_lock(&priv->rxlock); + rx_queue = priv->rx_queue; + tx_queue = priv->tx_queue; + spin_lock_irqsave(&tx_queue->txlock, flags); + spin_lock(&rx_queue->rxlock); - if (napi_schedule_prep(&priv->napi)) { + if (napi_schedule_prep(&rx_queue->napi)) { gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); - __napi_schedule(&priv->napi); + __napi_schedule(&rx_queue->napi); } else { /* * Clear IEVENT, so interrupts aren't called again @@ -1746,8 +1810,8 @@ static void gfar_schedule_cleanup(struct net_device *dev) gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); } - spin_unlock(&priv->rxlock); - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock(&rx_queue->rxlock); + spin_unlock_irqrestore(&tx_queue->txlock, flags); } /* Interrupt Handler for Transmit complete */ @@ -1757,15 +1821,16 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) return IRQ_HANDLED; } -static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, +static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb) { + struct net_device *dev = rx_queue->dev; struct gfar_private *priv = netdev_priv(dev); dma_addr_t buf; buf = dma_map_single(&priv->ofdev->dev, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE); - gfar_init_rxbdp(dev, bdp, buf); + gfar_init_rxbdp(rx_queue, bdp, buf); } @@ -1890,8 +1955,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, * until the budget/quota has been reached. Returns the number * of frames handled */ -int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { + struct net_device *dev = rx_queue->dev; struct rxbd8 *bdp, *base; struct sk_buff *skb; int pkt_len; @@ -1900,8 +1966,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) struct gfar_private *priv = netdev_priv(dev); /* Get the first full descriptor */ - bdp = priv->cur_rx; - base = priv->rx_bd_base; + bdp = rx_queue->cur_rx; + base = rx_queue->rx_bd_base; amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + priv->padding; @@ -1913,7 +1979,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) /* Add another skb for the future */ newskb = gfar_new_skb(dev); - skb = priv->rx_skbuff[priv->skb_currx]; + skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); @@ -1961,30 +2027,33 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) } - priv->rx_skbuff[priv->skb_currx] = newskb; + rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; /* Setup the new bdp */ - gfar_new_rxbdp(dev, bdp, newskb); + gfar_new_rxbdp(rx_queue, bdp, newskb); /* Update to the next pointer */ - bdp = next_bd(bdp, base, priv->rx_ring_size); + bdp = next_bd(bdp, base, rx_queue->rx_ring_size); /* update to point at the next skb */ - priv->skb_currx = - (priv->skb_currx + 1) & - RX_RING_MOD_MASK(priv->rx_ring_size); + rx_queue->skb_currx = + (rx_queue->skb_currx + 1) & + RX_RING_MOD_MASK(rx_queue->rx_ring_size); } /* Update the current rxbd pointer to be the next one */ - priv->cur_rx = bdp; + rx_queue->cur_rx = bdp; return howmany; } static int gfar_poll(struct napi_struct *napi, int budget) { - struct gfar_private *priv = container_of(napi, struct gfar_private, napi); - struct net_device *dev = priv->ndev; + struct gfar_priv_rx_q *rx_queue = container_of(napi, + struct gfar_priv_rx_q, napi); + struct net_device *dev = rx_queue->dev; + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; int tx_cleaned = 0; int rx_cleaned = 0; unsigned long flags; @@ -1992,14 +2061,15 @@ static int gfar_poll(struct napi_struct *napi, int budget) /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); + tx_queue = priv->tx_queue; /* If we fail to get the lock, don't bother with the TX BDs */ - if (spin_trylock_irqsave(&priv->txlock, flags)) { - tx_cleaned = gfar_clean_tx_ring(dev); - spin_unlock_irqrestore(&priv->txlock, flags); + if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { + tx_cleaned = gfar_clean_tx_ring(tx_queue); + spin_unlock_irqrestore(&tx_queue->txlock, flags); } - rx_cleaned = gfar_clean_rx_ring(dev, budget); + rx_cleaned = gfar_clean_rx_ring(rx_queue, budget); if (tx_cleaned) return budget; @@ -2014,13 +2084,13 @@ static int gfar_poll(struct napi_struct *napi, int budget) /* If we are coalescing interrupts, update the timer */ /* Otherwise, clear it */ - if (likely(priv->rxcoalescing)) { + if (likely(rx_queue->rxcoalescing)) { gfar_write(&priv->regs->rxic, 0); - gfar_write(&priv->regs->rxic, priv->rxic); + gfar_write(&priv->regs->rxic, rx_queue->rxic); } - if (likely(priv->txcoalescing)) { + if (likely(tx_queue->txcoalescing)) { gfar_write(&priv->regs->txic, 0); - gfar_write(&priv->regs->txic, priv->txic); + gfar_write(&priv->regs->txic, tx_queue->txic); } } @@ -2087,12 +2157,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; struct gfar __iomem *regs = priv->regs; unsigned long flags; struct phy_device *phydev = priv->phydev; int new_state = 0; - spin_lock_irqsave(&priv->txlock, flags); + tx_queue = priv->tx_queue; + spin_lock_irqsave(&tx_queue->txlock, flags); if (phydev->link) { u32 tempval = gfar_read(®s->maccfg2); u32 ecntrl = gfar_read(®s->ecntrl); @@ -2158,7 +2230,7 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - spin_unlock_irqrestore(&priv->txlock, flags); + spin_unlock_irqrestore(&tx_queue->txlock, flags); } /* Update the hash table based on the current list of multicast @@ -2172,7 +2244,7 @@ static void gfar_set_multi(struct net_device *dev) struct gfar __iomem *regs = priv->regs; u32 tempval; - if(dev->flags & IFF_PROMISC) { + if (dev->flags & IFF_PROMISC) { /* Set RCTRL to PROM */ tempval = gfar_read(®s->rctrl); tempval |= RCTRL_PROM; @@ -2184,7 +2256,7 @@ static void gfar_set_multi(struct net_device *dev) gfar_write(®s->rctrl, tempval); } - if(dev->flags & IFF_ALLMULTI) { + if (dev->flags & IFF_ALLMULTI) { /* Set the hash to rx all multicast frames */ gfar_write(®s->igaddr0, 0xffffffff); gfar_write(®s->igaddr1, 0xffffffff); @@ -2236,7 +2308,7 @@ static void gfar_set_multi(struct net_device *dev) em_num = 0; } - if(dev->mc_count == 0) + if (dev->mc_count == 0) return; /* Parse the list, and set the appropriate bits */ |