diff options
Diffstat (limited to 'drivers/net')
651 files changed, 23952 insertions, 8557 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index b6de7b1e2a5..3ea42ff1765 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c @@ -117,7 +117,6 @@ static const char version[] = #include <linux/fcntl.h> #include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/spinlock.h> diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index 66e0323c183..b74a0eadbd6 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c @@ -380,6 +380,12 @@ out: return retval; } +static irqreturn_t el2_probe_interrupt(int irq, void *seen) +{ + *(bool *)seen = true; + return IRQ_HANDLED; +} + static int el2_open(struct net_device *dev) { @@ -391,23 +397,35 @@ el2_open(struct net_device *dev) outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ do { - retval = request_irq(*irqp, NULL, 0, "bogus", dev); - if (retval >= 0) { + bool seen; + + retval = request_irq(*irqp, el2_probe_interrupt, 0, + dev->name, &seen); + if (retval == -EBUSY) + continue; + if (retval < 0) + goto err_disable; + /* Twinkle the interrupt, and check if it's seen. */ - unsigned long cookie = probe_irq_on(); + seen = false; + smp_wmb(); outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); outb_p(0x00, E33G_IDCFR); - if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */ - ((retval = request_irq(dev->irq = *irqp, - eip_interrupt, 0, - dev->name, dev)) == 0)) - break; - } else { - if (retval != -EBUSY) - return retval; - } + msleep(1); + free_irq(*irqp, el2_probe_interrupt); + if (!seen) + continue; + + retval = request_irq(dev->irq = *irqp, eip_interrupt, 0, + dev->name, dev); + if (retval == -EBUSY) + continue; + if (retval < 0) + goto err_disable; } while (*++irqp); + if (*irqp == 0) { + err_disable: outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ return -EAGAIN; } diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 04b5bba1902..8d584f5fd02 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -102,12 +102,12 @@ #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev) static void elp_set_mc_list(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; unsigned long flags; @@ -1231,8 +1231,9 @@ static void elp_set_mc_list(struct net_device *dev) adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; adapter->tx_pcb.length = 6 * netdev_mc_count(dev); i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy(adapter->tx_pcb.data.multicast[i++], + ha->addr, 6); adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send set_multicast command\n", dev->name); diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 77cf0901a44..b32b7a1710b 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -58,7 +58,6 @@ static const char version[] = #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/bitops.h> diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 902435a7646..ab9bb3c5200 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -76,7 +76,6 @@ #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/netdevice.h> diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 1e898b1c806..2e17837be54 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -65,7 +65,6 @@ static int max_interrupt_work = 20; #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index beed4fa10c6..8c70686d43a 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -99,7 +99,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/mca-legacy.h> @@ -625,7 +624,7 @@ static int init586(struct net_device *dev) volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs = netdev_mc_count(dev); ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); @@ -788,8 +787,9 @@ static int init586(struct net_device *dev) mc_cmd->cmd_link = 0xffff; mc_cmd->mc_cnt = num_addrs * 6; i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy((char *) mc_cmd->mc_list[i++], + ha->addr, 6); p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd = CUC_START; elmc_id_attn586(); diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 5c07b147ec9..38395dfa496 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) { unsigned char block[62]; unsigned char *bp; - struct dev_mc_list *dmc; + struct netdev_hw_addr *ha; if(retry==0) lp->mc_list_valid = 0; @@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) block[0]=netdev_mc_count(dev); bp=block+2; - netdev_for_each_mc_addr(dmc, dev) { - memcpy(bp, dmc->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(bp, ha->addr, 6); bp+=6; } if(mc32_command_nowait(dev, 2, block, diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index f965431f492..97525307ed2 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -77,7 +77,6 @@ static int vortex_debug = 1; #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/mii.h> @@ -90,6 +89,7 @@ static int vortex_debug = 1; #include <linux/eisa.h> #include <linux/bitops.h> #include <linux/jiffies.h> +#include <linux/gfp.h> #include <asm/irq.h> /* For nr_irqs only. */ #include <asm/io.h> #include <asm/uaccess.h> @@ -2129,8 +2129,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) int i; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb->len-skb->data_len, PCI_DMA_TODEVICE)); - vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len); + skb_headlen(skb), PCI_DMA_TODEVICE)); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 4e9a5a20b6a..f09e5945149 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -26,7 +26,6 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/route.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/skbuff.h> #include <asm/irq.h> @@ -595,7 +594,7 @@ static void lance_load_multicast (struct net_device *dev) struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -610,8 +609,8 @@ static void lance_load_multicast (struct net_device *dev) ib->filter [1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 3d4406b1665..4e8d11cfe47 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -64,6 +64,7 @@ #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/ethtool.h> +#include <linux/gfp.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/crc32.h> @@ -909,11 +910,11 @@ static void __cp_set_rx_mode (struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index b4efc913978..d149624b4a4 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -110,6 +110,7 @@ #include <linux/crc32.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/gfp.h> #include <asm/irq.h> #define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION @@ -2502,11 +2503,11 @@ static void __set_rx_mode (struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; diff --git a/drivers/net/82596.c b/drivers/net/82596.c index f94d17d78bb..97c5fc019cd 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -45,7 +45,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> @@ -53,6 +52,7 @@ #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> @@ -1542,7 +1542,7 @@ static void set_multicast_list(struct net_device *dev) } if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *cp; struct mc_cmd *cmd; @@ -1552,10 +1552,10 @@ static void set_multicast_list(struct net_device *dev) cmd->cmd.command = CmdMulticastList; cmd->mc_cnt = cnt * ETH_ALEN; cp = cmd->mc_addrs; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!cnt--) break; - memcpy(cp, dmi->dmi_addr, ETH_ALEN); + memcpy(cp, ha->addr, ETH_ALEN); if (i596_debug > 1) DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", dev->name, cp)); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 7029cd50c45..dbd26f99215 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC This is the driver for the onboard card of the Xtensa XT2000 board. config MIPS_AU1X00_ENET - bool "MIPS AU1000 Ethernet support" + tristate "MIPS AU1000 Ethernet support" depends on SOC_AU1X00 select PHYLIB select CRC32 @@ -907,7 +907,7 @@ config SMC91X select CRC32 select MII depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ - MIPS || BLACKFIN || MN10300 + MIPS || BLACKFIN || MN10300 || COLDFIRE help This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it @@ -1916,6 +1916,7 @@ config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 + select PHYLIB help Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. @@ -2434,8 +2435,8 @@ config MV643XX_ETH config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" + depends on PPC || MICROBLAZE select PHYLIB - depends on PPC_DCR_NATIVE help This driver supports the Xilinx 10/100/1000 LocalLink TEMAC core used in Xilinx Spartan and Virtex FPGAs @@ -2582,6 +2583,31 @@ config CHELSIO_T3 To compile this driver as a module, choose M here: the module will be called cxgb3. +config CHELSIO_T4_DEPENDS + tristate + depends on PCI && INET + default y + +config CHELSIO_T4 + tristate "Chelsio Communications T4 Ethernet support" + depends on CHELSIO_T4_DEPENDS + select FW_LOADER + select MDIO + help + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at <http://www.chelsio.com>. + + For customer support, please visit our customer support page at + <http://www.chelsio.com/support.htm>. + + Please send feedback to <linux-bugs@chelsio.com>. + + To compile this driver as a module choose M here; the module + will be called cxgb4. + config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS && INET && SPARSEMEM @@ -2593,11 +2619,11 @@ config EHEA will be called ehea. config ENIC - tristate "Cisco 10G Ethernet NIC support" + tristate "Cisco VIC Ethernet NIC Support" depends on PCI && INET select INET_LRO help - This enables the support for the Cisco 10G Ethernet card. + This enables the support for the Cisco VIC Ethernet card. config IXGBE tristate "Intel(R) 10GbE PCI Express adapters support" @@ -2837,6 +2863,8 @@ source "drivers/ieee802154/Kconfig" source "drivers/s390/net/Kconfig" +source "drivers/net/caif/Kconfig" + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN @@ -3155,17 +3183,12 @@ config PPPOATM config PPPOL2TP tristate "PPP over L2TP (EXPERIMENTAL)" - depends on EXPERIMENTAL && PPP && INET + depends on EXPERIMENTAL && L2TP && PPP help Support for PPP-over-L2TP socket family. L2TP is a protocol used by ISPs and enterprises to tunnel PPP traffic over UDP tunnels. L2TP is replacing PPTP for VPN uses. - This kernel component handles only L2TP data packets: a - userland daemon handles L2TP the control protocol (tunnel - and session setup). One such daemon is OpenL2TP - (http://openl2tp.sourceforge.net/). - config SLIP tristate "SLIP (serial line) support" ---help--- @@ -3252,15 +3275,14 @@ config NET_FC "SCSI generic support". config NETCONSOLE - tristate "Network console logging support (EXPERIMENTAL)" - depends on EXPERIMENTAL + tristate "Network console logging support" ---help--- If you want to log kernel messages over the network, enable this. See <file:Documentation/networking/netconsole.txt> for details. config NETCONSOLE_DYNAMIC - bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)" - depends on NETCONSOLE && SYSFS && EXPERIMENTAL + bool "Dynamic reconfiguration of logging targets" + depends on NETCONSOLE && SYSFS select CONFIGFS_FS help This option enables the ability to dynamically reconfigure target diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 478886234c2..ebf80b98306 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ +obj-$(CONFIG_CHELSIO_T4) += cxgb4/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BONDING) += bonding/ @@ -160,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o -obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o +obj-$(CONFIG_PPPOL2TP) += pppox.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o @@ -290,5 +291,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_SFC) += sfc/ obj-$(CONFIG_WIMAX) += wimax/ +obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index bd4d829eca1..ce0a0b8787d 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -46,7 +46,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/crc32.h> @@ -603,7 +602,7 @@ static void lance_load_multicast (struct net_device *dev) struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -618,8 +617,8 @@ static void lance_load_multicast (struct net_device *dev) ib->filter [1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 4ae750ef1e1..1328eb9b841 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -67,6 +67,7 @@ #include <linux/highmem.h> #include <linux/sockios.h> #include <linux/firmware.h> +#include <linux/slab.h> #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #include <linux/if_vlan.h> @@ -660,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev) dma_addr_t mapping; ringp = &ap->skb->rx_std_skbuff[i]; - mapping = pci_unmap_addr(ringp, mapping); + mapping = dma_unmap_addr(ringp, mapping); pci_unmap_page(ap->pdev, mapping, ACE_STD_BUFSIZE, PCI_DMA_FROMDEVICE); @@ -680,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev) dma_addr_t mapping; ringp = &ap->skb->rx_mini_skbuff[i]; - mapping = pci_unmap_addr(ringp,mapping); + mapping = dma_unmap_addr(ringp,mapping); pci_unmap_page(ap->pdev, mapping, ACE_MINI_BUFSIZE, PCI_DMA_FROMDEVICE); @@ -699,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev) dma_addr_t mapping; ringp = &ap->skb->rx_jumbo_skbuff[i]; - mapping = pci_unmap_addr(ringp, mapping); + mapping = dma_unmap_addr(ringp, mapping); pci_unmap_page(ap->pdev, mapping, ACE_JUMBO_BUFSIZE, PCI_DMA_FROMDEVICE); @@ -1682,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs) ACE_STD_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_std_skbuff[idx].skb = skb; - pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], + dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], mapping, mapping); rd = &ap->rx_std_ring[idx]; @@ -1743,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs) ACE_MINI_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_mini_skbuff[idx].skb = skb; - pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], + dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], mapping, mapping); rd = &ap->rx_mini_ring[idx]; @@ -1799,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs) ACE_JUMBO_BUFSIZE, PCI_DMA_FROMDEVICE); ap->skb->rx_jumbo_skbuff[idx].skb = skb; - pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], + dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], mapping, mapping); rd = &ap->rx_jumbo_ring[idx]; @@ -2012,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) skb = rip->skb; rip->skb = NULL; pci_unmap_page(ap->pdev, - pci_unmap_addr(rip, mapping), + dma_unmap_addr(rip, mapping), mapsize, PCI_DMA_FROMDEVICE); skb_put(skb, retdesc->size); @@ -2077,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev, do { struct sk_buff *skb; - dma_addr_t mapping; struct tx_ring_info *info; info = ap->skb->tx_skbuff + idx; skb = info->skb; - mapping = pci_unmap_addr(info, mapping); - if (mapping) { - pci_unmap_page(ap->pdev, mapping, - pci_unmap_len(info, maplen), + if (dma_unmap_len(info, maplen)) { + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), PCI_DMA_TODEVICE); - pci_unmap_addr_set(info, mapping, 0); + dma_unmap_len_set(info, maplen, 0); } if (skb) { @@ -2376,14 +2375,12 @@ static int ace_close(struct net_device *dev) for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { struct sk_buff *skb; - dma_addr_t mapping; struct tx_ring_info *info; info = ap->skb->tx_skbuff + i; skb = info->skb; - mapping = pci_unmap_addr(info, mapping); - if (mapping) { + if (dma_unmap_len(info, maplen)) { if (ACE_IS_TIGON_I(ap)) { /* NB: TIGON_1 is special, tx_ring is in io space */ struct tx_desc __iomem *tx; @@ -2394,10 +2391,10 @@ static int ace_close(struct net_device *dev) } else memset(ap->tx_ring + i, 0, sizeof(struct tx_desc)); - pci_unmap_page(ap->pdev, mapping, - pci_unmap_len(info, maplen), + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), PCI_DMA_TODEVICE); - pci_unmap_addr_set(info, mapping, 0); + dma_unmap_len_set(info, maplen, 0); } if (skb) { dev_kfree_skb(skb); @@ -2432,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, info = ap->skb->tx_skbuff + idx; info->skb = tail; - pci_unmap_addr_set(info, mapping, mapping); - pci_unmap_len_set(info, maplen, skb->len); + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, skb->len); return mapping; } @@ -2552,8 +2549,8 @@ restart: } else { info->skb = NULL; } - pci_unmap_addr_set(info, mapping, mapping); - pci_unmap_len_set(info, maplen, frag->size); + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, frag->size); ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); } } diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h index 17079b927ff..0681da7e875 100644 --- a/drivers/net/acenic.h +++ b/drivers/net/acenic.h @@ -589,7 +589,7 @@ struct ace_info { struct ring_info { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -600,8 +600,8 @@ struct ring_info { */ struct tx_ring_info { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) - DECLARE_PCI_UNMAP_LEN(maplen) + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(maplen); }; diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index b8a59d255b4..97d71a96060 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -73,7 +73,6 @@ Revision History: #include <linux/kernel.h> #include <linux/types.h> #include <linux/compiler.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> @@ -1377,7 +1376,7 @@ list to the device. */ static void amd8111e_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; struct amd8111e_priv *lp = netdev_priv(dev); u32 mc_filter[2] ; int bit_num; @@ -1408,8 +1407,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev) /* load all the multicast addresses in the logic filter */ lp->options |= OPTION_MULTICAST_ENABLE; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mc_ptr, dev) { - bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); } amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 73b38c204eb..6f8d6206b5c 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -56,7 +56,6 @@ static const char *version = #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index eb0448b03f4..79636ee3582 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -31,6 +31,7 @@ #include <linux/ip.h> #include <linux/atalk.h> #include <linux/if_arp.h> +#include <linux/slab.h> #include <net/route.h> #include <asm/uaccess.h> diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 8ea4ec705be..6af65b656f3 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -215,7 +215,6 @@ static int dma; #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> @@ -228,6 +227,7 @@ static int dma; #include <linux/timer.h> #include <linux/atalk.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/dma.h> diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 8ea9c7545c1..705e6ce2eb9 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -25,6 +25,7 @@ */ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index e6afab2455b..9efbbbae47c 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -28,7 +28,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 66bcbbb6bab..355797f7004 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -27,6 +27,7 @@ */ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index db08fc24047..0402da30a4e 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -30,7 +30,6 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/netdevice.h> diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index b68e1eb405f..2c712af6c26 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -31,7 +31,6 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/init.h> diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 0a74f21409c..c9e459400ff 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -29,7 +29,6 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/netdevice.h> diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 28dea518d55..4cb401813b7 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -29,7 +29,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 112e230cb13..f3b46f71e29 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -30,6 +30,7 @@ #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <asm/io.h> #include <linux/arcdevice.h> diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 06f8fa2f8f2..f81db4070a5 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -24,6 +24,7 @@ * ********************** */ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 745530651c4..b71431aae08 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -23,6 +23,7 @@ * * ********************** */ +#include <linux/gfp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/if_arp.h> diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 08d8be47dae..fa1a2354f5f 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -40,7 +40,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index f1f58c5e27b..a4b5b08276f 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev) } else if (dev->flags & IFF_ALLMULTI) { memset(multi_hash, 0xff, sizeof(multi_hash)); } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; memset(multi_hash, 0x00, sizeof(multi_hash)); - netdev_for_each_mc_addr(dmi, dev) - am79c961_mc_hash(dmi->dmi_addr, multi_hash); + netdev_for_each_mc_addr(ha, dev) + am79c961_mc_hash(ha->addr, multi_hash); } spin_lock_irqsave(&priv->chip_lock, flags); diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 8b23d5a175b..0adab30f626 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -27,6 +27,7 @@ #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/uaccess.h> @@ -556,14 +557,14 @@ static int hash_get_index(__u8 *addr) */ static void at91ether_sethashtable(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index bf72d57a0af..6995169d285 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -23,6 +23,7 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/slab.h> #include <mach/hardware.h> diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index f52f668c49b..4af235d41fd 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -33,7 +33,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 6e2ae1d06df..7800d7dfd29 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <mach/npe.h> #include <mach/qmgr.h> @@ -735,7 +736,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) static void eth_set_mcast_list(struct net_device *dev) { struct port *port = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u8 diffs[ETH_ALEN], *addr; int i; @@ -748,11 +749,11 @@ static void eth_set_mcast_list(struct net_device *dev) memset(diffs, 0, ETH_ALEN); addr = NULL; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!addr) - addr = mclist->dmi_addr; /* first MAC address */ + addr = ha->addr; /* first MAC address */ for (i = 0; i < ETH_ALEN; i++) - diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; + diffs[i] |= addr[i] ^ ha->addr[i]; } for (i = 0; i < ETH_ALEN; i++) { diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index a1d4188c430..7413a87e40f 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -30,6 +30,7 @@ #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/irq.h> @@ -331,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp, { u32 low, high; int i; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; i = 0; - netdev_for_each_mc_addr(dmi, ndev) { + netdev_for_each_mc_addr(ha, ndev) { /* Ran out of space in chip? */ BUG_ON(i == KS8695_NR_ADDRESSES); - low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) | - (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]); - high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]); + low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | + (ha->addr[4] << 8) | (ha->addr[5]); + high = (ha->addr[0] << 8) | (ha->addr[1]); ks8695_writereg(ksp, KS8695_AAL_(i), low); ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); @@ -449,11 +450,10 @@ ks8695_rx_irq(int irq, void *dev_id) } /** - * ks8695_rx - Receive packets called by NAPI poll method + * ks8695_rx - Receive packets called by NAPI poll method * @ksp: Private data for the KS8695 Ethernet - * @budget: The max packets would be receive + * @budget: Number of packets allowed to process */ - static int ks8695_rx(struct ks8695_priv *ksp, int budget) { struct net_device *ndev = ksp->ndev; @@ -461,7 +461,6 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) int buff_n; u32 flags; int pktlen; - int last_rx_processed = -1; int received = 0; buff_n = ksp->next_rx_desc_read; @@ -471,6 +470,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) cpu_to_le32(RDES_OWN)))) { rmb(); flags = le32_to_cpu(ksp->rx_ring[buff_n].status); + /* Found an SKB which we own, this means we * received a packet */ @@ -533,23 +533,18 @@ rx_failure: ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); rx_finished: received++; - /* And note this as processed so we can start - * from here next time - */ - last_rx_processed = buff_n; buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; - /*And note which RX descriptor we last did */ - if (likely(last_rx_processed != -1)) - ksp->next_rx_desc_read = - (last_rx_processed + 1) & - MAX_RX_DESC_MASK; } + + /* And note which RX descriptor we last did */ + ksp->next_rx_desc_read = buff_n; + /* And refill the buffers */ ks8695_refill_rxbuffers(ksp); - /* Kick the RX DMA engine, in case it became - * suspended */ + /* Kick the RX DMA engine, in case it became suspended */ ks8695_writereg(ksp, KS8695_DRSC, 0); + return received; } diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c index febd813c916..2491934b73e 100644 --- a/drivers/net/arm/w90p910_ether.c +++ b/drivers/net/arm/w90p910_ether.c @@ -18,6 +18,7 @@ #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/gfp.h> #define DRV_MODULE_NAME "w90p910-emc" #define DRV_MODULE_VERSION "0.1" @@ -743,7 +744,6 @@ static void netdev_rx(struct net_device *dev) return; } - skb->dev = dev; skb_reserve(skb, 2); skb_put(skb, length); skb_copy_to_linear_data(skb, data, length); diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 309843ab886..332f9806b78 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -47,7 +47,6 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/crc32.h> @@ -848,12 +847,12 @@ set_rx_mode(struct net_device *dev) memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit = - ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; + ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << bit); } outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */ diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 280cfff48b4..a8686bfec7a 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -53,7 +53,6 @@ static char version[] = "atarilance.c: v1.3 04/04/96 " #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/bitops.h> diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c index 61a0f2ff11e..32339243d61 100644 --- a/drivers/net/atl1c/atl1c_ethtool.c +++ b/drivers/net/atl1c/atl1c_ethtool.c @@ -22,6 +22,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> +#include <linux/slab.h> #include "atl1c.h" diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 50dc531a02d..3d7051135c3 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -354,7 +354,7 @@ static void atl1c_set_multi(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 mac_ctrl_data; u32 hash_value; @@ -377,8 +377,8 @@ static void atl1c_set_multi(struct net_device *netdev) AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1c_hash_mc_addr(hw, ha->addr); atl1c_hash_set(hw, hash_value); } } @@ -1817,7 +1817,6 @@ rrs_checked: atl1c_clean_rfd(rfd_ring, rrs, rfd_num); skb_put(skb, length - ETH_FCS_LEN); skb->protocol = eth_type_trans(skb, netdev); - skb->dev = netdev; atl1c_rx_checksum(adapter, skb, rrs); if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) { u16 vlan; diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index a76006c1bc6..ffd696ee7c8 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c @@ -22,6 +22,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> +#include <linux/slab.h> #include "atl1e.h" diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 73302ae468a..7dd33776de0 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 mac_ctrl_data = 0; u32 hash_value; @@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev) AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1e_hash_mc_addr(hw, ha->addr); atl1e_hash_set(hw, hash_value); } } @@ -1428,7 +1428,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, "Memory squeeze, deferring packet\n"); goto skip_pkt; } - skb->dev = netdev; memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); skb->protocol = eth_type_trans(skb, netdev); @@ -1680,7 +1679,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; - u16 buf_len = skb->len - skb->data_len; + u16 buf_len = skb_headlen(skb); u16 map_len = 0; u16 mapped_len = 0; u16 hdr_len = 0; diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9ba547069db..33448a09b47 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -84,7 +84,7 @@ #define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ - Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); +Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATLX_DRIVER_VERSION); @@ -2347,7 +2347,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - int len = skb->len; + int len; int tso; int count = 1; int ret_val; @@ -2359,7 +2359,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, unsigned int f; unsigned int proto_hdr_len; - len -= skb->data_len; + len = skb_headlen(skb); if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index 7061d7108f0..fee9cf6a5bd 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@ -39,6 +39,7 @@ #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> @@ -135,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; @@ -157,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev) ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl2_hash_mc_addr(hw, ha->addr); atl2_hash_set(hw, hash_value); } } @@ -421,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) netdev->stats.rx_dropped++; break; } - skb->dev = netdev; memcpy(skb->data, rxd->packet, rx_size); skb_put(skb, rx_size); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h index d918bbe621e..927e4de6474 100644 --- a/drivers/net/atlx/atl2.h +++ b/drivers/net/atlx/atl2.h @@ -442,7 +442,7 @@ struct atl2_hw { struct atl2_ring_header { /* pointer to the descriptor ring memory */ void *desc; - /* physical adress of the descriptor ring */ + /* physical address of the descriptor ring */ dma_addr_t dma; /* length of descriptor ring in bytes */ unsigned int size; diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index 72f3306352e..f979ea2d6d3 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c @@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); struct atlx_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; @@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev) iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); /* compute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atlx_hash_mc_addr(hw, ha->addr); atlx_hash_set(hw, hash_value); } } diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 6ad16205dc1..2bd1a5c0ec1 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c @@ -129,7 +129,6 @@ static int xcvr[NUM_UNITS]; /* The data transfer mode. */ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> @@ -883,11 +882,11 @@ static void set_rx_mode_8012(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); new_mode = CMR2h_Normal; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); } new_mode = CMR2h_Normal; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 4da191b87b0..7abb2c84ba5 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -75,14 +75,19 @@ static int au1000_debug = 5; static int au1000_debug = 3; #endif +#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + #define DRV_NAME "au1000_eth" -#define DRV_VERSION "1.6" +#define DRV_VERSION "1.7" #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" #define DRV_DESC "Au1xxx on-chip Ethernet driver" MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); /* * Theory of operation @@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES]; * specific irq-map */ -static void enable_mac(struct net_device *dev, int force_reset) +static void au1000_enable_mac(struct net_device *dev, int force_reset) { unsigned long flags; struct au1000_private *aup = netdev_priv(dev); @@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) while (*mii_control_reg & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { - printk(KERN_ERR "%s: read_MII busy timeout!!\n", - dev->name); + netdev_err(dev, "read_MII busy timeout!!\n"); return -1; } } @@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) while (*mii_control_reg & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { - printk(KERN_ERR "%s: mdio_read busy timeout!!\n", - dev->name); + netdev_err(dev, "mdio_read busy timeout!!\n"); return -1; } } @@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr, while (*mii_control_reg & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { - printk(KERN_ERR "%s: mdio_write busy timeout!!\n", - dev->name); + netdev_err(dev, "mdio_write busy timeout!!\n"); return; } } @@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ struct net_device *const dev = bus->priv; - enable_mac(dev, 0); /* make sure the MAC associated with this + au1000_enable_mac(dev, 0); /* make sure the MAC associated with this * mii_bus is enabled */ return au1000_mdio_read(dev, phy_addr, regnum); } @@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, { struct net_device *const dev = bus->priv; - enable_mac(dev, 0); /* make sure the MAC associated with this + au1000_enable_mac(dev, 0); /* make sure the MAC associated with this * mii_bus is enabled */ au1000_mdio_write(dev, phy_addr, regnum, value); return 0; @@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus) { struct net_device *const dev = bus->priv; - enable_mac(dev, 0); /* make sure the MAC associated with this + au1000_enable_mac(dev, 0); /* make sure the MAC associated with this * mii_bus is enabled */ return 0; } -static void hard_stop(struct net_device *dev) +static void au1000_hard_stop(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); - if (au1000_debug > 4) - printk(KERN_INFO "%s: hard stop\n", dev->name); + netif_dbg(aup, drv, dev, "hard stop\n"); aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); au_sync_delay(10); } -static void enable_rx_tx(struct net_device *dev) +static void au1000_enable_rx_tx(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); - if (au1000_debug > 4) - printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); + netif_dbg(aup, hw, dev, "enable_rx_tx\n"); aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); au_sync_delay(10); @@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev) spin_lock_irqsave(&aup->lock, flags); if (phydev->link && (aup->old_speed != phydev->speed)) { - // speed changed + /* speed changed */ - switch(phydev->speed) { + switch (phydev->speed) { case SPEED_10: case SPEED_100: break; default: - printk(KERN_WARNING - "%s: Speed (%d) is not 10/100 ???\n", - dev->name, phydev->speed); + netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", + phydev->speed); break; } @@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev) } if (phydev->link && (aup->old_duplex != phydev->duplex)) { - // duplex mode changed + /* duplex mode changed */ /* switching duplex mode requires to disable rx and tx! */ - hard_stop(dev); + au1000_hard_stop(dev); if (DUPLEX_FULL == phydev->duplex) aup->mac->control = ((aup->mac->control @@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev) | MAC_DISABLE_RX_OWN); au_sync_delay(1); - enable_rx_tx(dev); + au1000_enable_rx_tx(dev); aup->old_duplex = phydev->duplex; status_change = 1; } - if(phydev->link != aup->old_link) { - // link state changed + if (phydev->link != aup->old_link) { + /* link state changed */ if (!phydev->link) { /* link went down */ @@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev) if (status_change) { if (phydev->link) - printk(KERN_INFO "%s: link up (%d/%s)\n", - dev->name, phydev->speed, + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); else - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); } } -static int mii_probe (struct net_device *dev) +static int au1000_mii_probe (struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); struct phy_device *phydev = NULL; @@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev) if (aup->phy_addr) phydev = aup->mii_bus->phy_map[aup->phy_addr]; else - printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n", - dev->name); + netdev_info(dev, "using PHY-less setup\n"); return 0; } else { int phy_addr; @@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev) /* try harder to find a PHY */ if (!phydev && (aup->mac_id == 1)) { /* no PHY found, maybe we have a dual PHY? */ - printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, " + dev_info(&dev->dev, ": no PHY found on MAC1, " "let's see if it's attached to MAC0...\n"); /* find the first (lowest address) non-attached PHY on @@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev) } if (!phydev) { - printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name); + netdev_err(dev, "no PHY found\n"); return -1; } @@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev) 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + netdev_err(dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } @@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev) aup->old_duplex = -1; aup->phy_dev = phydev; - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + netdev_info(dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; @@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev) * has the virtual and dma address of a buffer suitable for * both, receive and transmit operations. */ -static db_dest_t *GetFreeDB(struct au1000_private *aup) +static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup) { db_dest_t *pDB; pDB = aup->pDBfree; @@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup) return pDB; } -void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) +void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) { db_dest_t *pDBfree = aup->pDBfree; if (pDBfree) @@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) aup->pDBfree = pDB; } -static void reset_mac_unlocked(struct net_device *dev) +static void au1000_reset_mac_unlocked(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); int i; - hard_stop(dev); + au1000_hard_stop(dev); *aup->enable = MAC_EN_CLOCK_ENABLE; au_sync_delay(2); @@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev) } -static void reset_mac(struct net_device *dev) +static void au1000_reset_mac(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); unsigned long flags; - if (au1000_debug > 4) - printk(KERN_INFO "%s: reset mac, aup %x\n", - dev->name, (unsigned)aup); + netif_dbg(aup, hw, dev, "reset mac, aup %x\n", + (unsigned)aup); spin_lock_irqsave(&aup->lock, flags); - reset_mac_unlocked (dev); + au1000_reset_mac_unlocked (dev); spin_unlock_irqrestore(&aup->lock, flags); } @@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev) * these are not descriptors sitting in memory. */ static void -setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) +au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) { int i; @@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) info->regdump_len = 0; } +static void au1000_set_msglevel(struct net_device *dev, u32 value) +{ + struct au1000_private *aup = netdev_priv(dev); + aup->msg_enable = value; +} + +static u32 au1000_get_msglevel(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + return aup->msg_enable; +} + static const struct ethtool_ops au1000_ethtool_ops = { .get_settings = au1000_get_settings, .set_settings = au1000_set_settings, .get_drvinfo = au1000_get_drvinfo, .get_link = ethtool_op_get_link, + .get_msglevel = au1000_get_msglevel, + .set_msglevel = au1000_set_msglevel, }; @@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev) int i; u32 control; - if (au1000_debug > 4) - printk("%s: au1000_init\n", dev->name); + netif_dbg(aup, hw, dev, "au1000_init\n"); /* bring the device out of reset */ - enable_mac(dev, 1); + au1000_enable_mac(dev, 1); spin_lock_irqsave(&aup->lock, flags); @@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev) return 0; } -static inline void update_rx_stats(struct net_device *dev, u32 status) +static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) { struct net_device_stats *ps = &dev->stats; @@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status) ps->rx_crc_errors++; if (status & RX_COLL) ps->collisions++; - } - else + } else ps->rx_bytes += status & RX_FRAME_LEN_MASK; } @@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev) db_dest_t *pDB; u32 frmlen; - if (au1000_debug > 5) - printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head); + netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; - update_rx_stats(dev, status); + au1000_update_rx_stats(dev, status); if (!(status & RX_ERROR)) { /* good frame */ @@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev) frmlen -= 4; /* Remove FCS */ skb = dev_alloc_skb(frmlen + 2); if (skb == NULL) { - printk(KERN_ERR - "%s: Memory squeeze, dropping packet.\n", - dev->name); + netdev_err(dev, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; continue; } @@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev) skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ - } - else { + } else { if (au1000_debug > 4) { if (status & RX_MISSED_FRAME) printk("rx miss\n"); @@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev) return 0; } -static void update_tx_stats(struct net_device *dev, u32 status) +static void au1000_update_tx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; @@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status) ps->tx_errors++; ps->tx_aborted_errors++; } - } - else { + } else { ps->tx_errors++; ps->tx_aborted_errors++; if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) @@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev) ptxd = aup->tx_dma_ring[aup->tx_tail]; while (ptxd->buff_stat & TX_T_DONE) { - update_tx_stats(dev, ptxd->status); + au1000_update_tx_stats(dev, ptxd->status); ptxd->buff_stat &= ~TX_T_DONE; ptxd->len = 0; au_sync(); @@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev) int retval; struct au1000_private *aup = netdev_priv(dev); - if (au1000_debug > 4) - printk("%s: open: dev=%p\n", dev->name, dev); + netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); - if ((retval = request_irq(dev->irq, au1000_interrupt, 0, - dev->name, dev))) { - printk(KERN_ERR "%s: unable to get IRQ %d\n", - dev->name, dev->irq); + retval = request_irq(dev->irq, au1000_interrupt, 0, + dev->name, dev); + if (retval) { + netdev_err(dev, "unable to get IRQ %d\n", dev->irq); return retval; } - if ((retval = au1000_init(dev))) { - printk(KERN_ERR "%s: error in au1000_init\n", dev->name); + retval = au1000_init(dev); + if (retval) { + netdev_err(dev, "error in au1000_init\n"); free_irq(dev->irq, dev); return retval; } @@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev) netif_start_queue(dev); - if (au1000_debug > 4) - printk("%s: open: Initialization done.\n", dev->name); + netif_dbg(aup, drv, dev, "open: Initialization done.\n"); return 0; } @@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev) unsigned long flags; struct au1000_private *const aup = netdev_priv(dev); - if (au1000_debug > 4) - printk("%s: close: dev=%p\n", dev->name, dev); + netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); if (aup->phy_dev) phy_stop(aup->phy_dev); spin_lock_irqsave(&aup->lock, flags); - reset_mac_unlocked (dev); + au1000_reset_mac_unlocked (dev); /* stop the device */ netif_stop_queue(dev); @@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) db_dest_t *pDB; int i; - if (au1000_debug > 5) - printk("%s: tx: aup %x len=%d, data=%p, head %d\n", - dev->name, (unsigned)aup, skb->len, + netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", + (unsigned)aup, skb->len, skb->data, aup->tx_head); ptxd = aup->tx_dma_ring[aup->tx_head]; @@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); aup->tx_full = 1; return NETDEV_TX_BUSY; - } - else if (buff_stat & TX_T_DONE) { - update_tx_stats(dev, ptxd->status); + } else if (buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); ptxd->len = 0; } @@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) pDB = aup->tx_db_inuse[aup->tx_head]; skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); if (skb->len < ETH_ZLEN) { - for (i=skb->len; i<ETH_ZLEN; i++) { + for (i = skb->len; i < ETH_ZLEN; i++) { ((char *)pDB->vaddr)[i] = 0; } ptxd->len = ETH_ZLEN; - } - else + } else ptxd->len = skb->len; ps->tx_packets++; @@ -935,8 +934,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) */ static void au1000_tx_timeout(struct net_device *dev) { - printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev); - reset_mac(dev); + netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); + au1000_reset_mac(dev); au1000_init(dev); dev->trans_start = jiffies; netif_wake_queue(dev); @@ -946,8 +945,7 @@ static void au1000_multicast_list(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); - if (au1000_debug > 4) - printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags); + netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ aup->mac->control |= MAC_PROMISCUOUS; @@ -955,14 +953,14 @@ static void au1000_multicast_list(struct net_device *dev) netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { aup->mac->control |= MAC_PASS_ALL_MULTI; aup->mac->control &= ~MAC_PROMISCUOUS; - printk(KERN_INFO "%s: Pass all multicast\n", dev->name); + netdev_info(dev, "Pass all multicast\n"); } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[2]; /* Multicast hash filter */ mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) - set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, (long *)mc_filter); aup->mac->multi_hash_high = mc_filter[1]; aup->mac->multi_hash_low = mc_filter[0]; @@ -975,9 +973,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct au1000_private *aup = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; + if (!netif_running(dev)) + return -EINVAL; - if (!aup->phy_dev) return -EINVAL; // PHY not controllable + if (!aup->phy_dev) + return -EINVAL; /* PHY not controllable */ return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } @@ -996,7 +996,7 @@ static const struct net_device_ops au1000_netdev_ops = { static int __devinit au1000_probe(struct platform_device *pdev) { - static unsigned version_printed = 0; + static unsigned version_printed; struct au1000_private *aup = NULL; struct au1000_eth_platform_data *pd; struct net_device *dev = NULL; @@ -1007,40 +1007,40 @@ static int __devinit au1000_probe(struct platform_device *pdev) base = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!base) { - printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n"); + dev_err(&pdev->dev, "failed to retrieve base register\n"); err = -ENODEV; goto out; } macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!macen) { - printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n"); + dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); err = -ENODEV; goto out; } irq = platform_get_irq(pdev, 0); if (irq < 0) { - printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n"); + dev_err(&pdev->dev, "failed to retrieve IRQ\n"); err = -ENODEV; goto out; } if (!request_mem_region(base->start, resource_size(base), pdev->name)) { - printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n"); + dev_err(&pdev->dev, "failed to request memory region for base registers\n"); err = -ENXIO; goto out; } if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { - printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n"); + dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); err = -ENXIO; goto err_request; } dev = alloc_etherdev(sizeof(struct au1000_private)); if (!dev) { - printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); + dev_err(&pdev->dev, "alloc_etherdev failed\n"); err = -ENOMEM; goto err_alloc; } @@ -1050,6 +1050,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup = netdev_priv(dev); spin_lock_init(&aup->lock); + aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug); /* Allocate the data buffers */ /* Snooping works fine with eth on all au1xxx */ @@ -1057,7 +1058,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) (NUM_TX_BUFFS + NUM_RX_BUFFS), &aup->dma_addr, 0); if (!aup->vaddr) { - printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n"); + dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; goto err_vaddr; } @@ -1065,7 +1066,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) /* aup->mac is the base address of the MAC's registers */ aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base)); if (!aup->mac) { - printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n"); + dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); err = -ENXIO; goto err_remap1; } @@ -1073,7 +1074,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) /* Setup some variables for quick register address access */ aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen)); if (!aup->enable) { - printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n"); + dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); err = -ENXIO; goto err_remap2; } @@ -1083,14 +1084,13 @@ static int __devinit au1000_probe(struct platform_device *pdev) if (prom_get_ethernet_addr(ethaddr) == 0) memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); else { - printk(KERN_INFO "%s: No MAC address found\n", - dev->name); + netdev_info(dev, "No MAC address found\n"); /* Use the hard coded MAC addresses */ } - setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); + au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); } else if (pdev->id == 1) - setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); + au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); /* * Assign to the Ethernet ports two consecutive MAC addresses @@ -1104,7 +1104,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) pd = pdev->dev.platform_data; if (!pd) { - printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n"); + dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n"); aup->phy1_search_mac0 = 1; } else { aup->phy_static_config = pd->phy_static_config; @@ -1116,7 +1116,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) } if (aup->phy_busid && aup->phy_busid > 0) { - printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII" + dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII" "bus not supported yet\n"); err = -ENODEV; goto err_mdiobus_alloc; @@ -1124,7 +1124,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup->mii_bus = mdiobus_alloc(); if (aup->mii_bus == NULL) { - printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n"); + dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); err = -ENOMEM; goto err_mdiobus_alloc; } @@ -1139,7 +1139,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) if (aup->mii_bus->irq == NULL) goto err_out; - for(i = 0; i < PHY_MAX_ADDR; ++i) + for (i = 0; i < PHY_MAX_ADDR; ++i) aup->mii_bus->irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ if (aup->phy_static_config) @@ -1148,11 +1148,11 @@ static int __devinit au1000_probe(struct platform_device *pdev) err = mdiobus_register(aup->mii_bus); if (err) { - printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n"); + dev_err(&pdev->dev, "failed to register MDIO bus\n"); goto err_mdiobus_reg; } - if (mii_probe(dev) != 0) + if (au1000_mii_probe(dev) != 0) goto err_out; pDBfree = NULL; @@ -1168,7 +1168,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup->pDBfree = pDBfree; for (i = 0; i < NUM_RX_DMA; i++) { - pDB = GetFreeDB(aup); + pDB = au1000_GetFreeDB(aup); if (!pDB) { goto err_out; } @@ -1176,7 +1176,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup->rx_db_inuse[i] = pDB; } for (i = 0; i < NUM_TX_DMA; i++) { - pDB = GetFreeDB(aup); + pDB = au1000_GetFreeDB(aup); if (!pDB) { goto err_out; } @@ -1195,17 +1195,16 @@ static int __devinit au1000_probe(struct platform_device *pdev) * The boot code uses the ethernet controller, so reset it to start * fresh. au1000_init() expects that the device is in reset state. */ - reset_mac(dev); + au1000_reset_mac(dev); err = register_netdev(dev); if (err) { - printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n", - dev->name); + netdev_err(dev, "Cannot register net device, aborting.\n"); goto err_out; } - printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n", - dev->name, (unsigned long)base->start, irq); + netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", + (unsigned long)base->start, irq); if (version_printed++ == 0) printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); @@ -1217,15 +1216,15 @@ err_out: /* here we should have a valid dev plus aup-> register addresses * so we can reset the mac properly.*/ - reset_mac(dev); + au1000_reset_mac(dev); for (i = 0; i < NUM_RX_DMA; i++) { if (aup->rx_db_inuse[i]) - ReleaseDB(aup, aup->rx_db_inuse[i]); + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); } for (i = 0; i < NUM_TX_DMA; i++) { if (aup->tx_db_inuse[i]) - ReleaseDB(aup, aup->tx_db_inuse[i]); + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); } err_mdiobus_reg: mdiobus_free(aup->mii_bus); @@ -1261,11 +1260,11 @@ static int __devexit au1000_remove(struct platform_device *pdev) for (i = 0; i < NUM_RX_DMA; i++) if (aup->rx_db_inuse[i]) - ReleaseDB(aup, aup->rx_db_inuse[i]); + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); for (i = 0; i < NUM_TX_DMA; i++) if (aup->tx_db_inuse[i]) - ReleaseDB(aup, aup->tx_db_inuse[i]); + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h index f9d29a29b8f..d06ec008fbf 100644 --- a/drivers/net/au1000_eth.h +++ b/drivers/net/au1000_eth.h @@ -35,7 +35,7 @@ #define NUM_TX_BUFFS 4 #define MAX_BUF_SIZE 2048 -#define ETH_TX_TIMEOUT HZ/4 +#define ETH_TX_TIMEOUT (HZ/4) #define MAC_MIN_PKT_SIZE 64 #define MULTICAST_FILTER_LIMIT 64 @@ -125,4 +125,6 @@ struct au1000_private { dma_addr_t dma_addr; /* dma address of rx/tx buffers */ spinlock_t lock; /* Serialise access to device */ + + u32 msg_enable; }; diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 1dd4403247c..b718dc60afc 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -25,6 +25,7 @@ #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/eeprom_93cx6.h> +#include <linux/slab.h> #include <net/ax88796.h> diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 332c6035628..45827219699 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -27,6 +27,7 @@ #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/ssb/ssb.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -1680,15 +1681,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev) static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i, num_ents; num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); i = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i == num_ents) break; - __b44_cam_write(bp, mclist->dmi_addr, i++ + 1); + __b44_cam_write(bp, ha->addr, i++ + 1); } return i+1; } diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c index 8cdcab7655c..9a8bdea4a8e 100644 --- a/drivers/net/bcm63xx_enet.c +++ b/drivers/net/bcm63xx_enet.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/clk.h> #include <linux/etherdevice.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/crc32.h> @@ -340,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) } skb_put(skb, len); - skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); priv->stats.rx_packets++; priv->stats.rx_bytes += len; - dev->last_rx = jiffies; netif_receive_skb(skb); } while (--budget > 0); @@ -604,7 +603,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 val; int i; @@ -632,14 +631,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev) } i = 0; - netdev_for_each_mc_addr(mc_list, dev) { + netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ - dmi_addr = mc_list->dmi_addr; + dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); @@ -959,7 +958,9 @@ static int bcm_enet_open(struct net_device *dev) /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); - enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_ENABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 8f075255368..373c1a56347 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -29,6 +29,7 @@ #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/firmware.h> +#include <linux/slab.h> #include "be_hw.h" @@ -83,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev) #define FW_VER_LEN 32 +#define BE_MAX_VF 32 + struct be_dma_mem { void *va; dma_addr_t dma; @@ -206,7 +209,7 @@ struct be_tx_obj { /* Struct to remember the pages posted for rx frags */ struct be_rx_page_info { struct page *page; - dma_addr_t bus; + DEFINE_DMA_UNMAP_ADDR(bus); u16 page_offset; bool last_page_user; }; @@ -280,8 +283,15 @@ struct be_adapter { u8 port_type; u8 transceiver; u8 generation; /* BladeEngine ASIC generation */ + + bool sriov_enabled; + u32 vf_if_handle[BE_MAX_VF]; + u32 vf_pmac_id[BE_MAX_VF]; + u8 base_eq_id; }; +#define be_physfn(adapter) (!adapter->pdev->is_virtfn) + /* BladeEngine Generation numbers */ #define BE_GEN2 2 #define BE_GEN3 3 diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index c59215361f4..e79bf8b9af3 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -673,7 +673,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE, sizeof(*req)); - req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, @@ -843,7 +843,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, * Uses mbox */ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, - u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) + u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id, + u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_create *req; @@ -860,6 +861,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); + req->hdr.domain = domain; req->capability_flags = cpu_to_le32(cap_flags); req->enable_flags = cpu_to_le32(en_flags); req->pmac_invalid = pmac_invalid; @@ -1111,6 +1113,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_PROMISCUOUS, sizeof(*req)); + /* In FW versions X.102.149/X.101.487 and later, + * the port setting associated only with the + * issuing pci function will take effect + */ if (port_num) req->port1_promiscuous = en; else @@ -1157,13 +1163,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, req->interface_id = if_id; if (netdev) { int i; - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; req->num_mac = cpu_to_le16(netdev_mc_count(netdev)); i = 0; - netdev_for_each_mc_addr(mc, netdev) - memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); } else { req->promiscuous = 1; } @@ -1464,8 +1470,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); - req->params.offset = offset; - req->params.data_buf_size = 0x4; + req->params.offset = cpu_to_le32(offset); + req->params.data_buf_size = cpu_to_le32(0x4); status = be_mcc_notify_wait(adapter); if (!status) diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index cce61f9a371..763dc199e33 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, bool pmac_invalid, - u32 *if_handle, u32 *pmac_id); + u32 *if_handle, u32 *pmac_id, u32 domain); extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9560d48944a..d488d52d710 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -490,13 +490,13 @@ be_test_ddr_dma(struct be_adapter *adapter) { int ret, i; struct be_dma_mem ddrdma_cmd; - u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; + u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, &ddrdma_cmd.dma); if (!ddrdma_cmd.va) { - dev_err(&adapter->pdev->dev, "Memory allocation failure \n"); + dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 2d4a4b82763..063026de495 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -99,6 +99,9 @@ /* Number of entries posted */ #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ +/********** SRIOV VF PCICFG OFFSET ********/ +#define SRIOV_VF_PCICFG_OFFSET (4096) + /* Flashrom related descriptors */ #define IMAGE_TYPE_FIRMWARE 160 #define IMAGE_TYPE_BOOTCODE 224 diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f923..fa10f13242c 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation"); MODULE_LICENSE("GPL"); static unsigned int rx_frag_size = 2048; +static unsigned int num_vfs; module_param(rx_frag_size, uint, S_IRUGO); +module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); +MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, @@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + /* MAC addr configuration will be done in hardware for VFs + * by their corresponding PFs. Just copy to netdev addr here + */ + if (!be_physfn(adapter)) + goto netdev_addr; + status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); if (status) return status; status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, adapter->if_handle, &adapter->pmac_id); +netdev_addr: if (!status) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); } +static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, + bool unmap_single) +{ + dma_addr_t dma; + + be_dws_le_to_cpu(wrb, sizeof(*wrb)); + + dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; + if (wrb->frag_len) { + if (unmap_single) + pci_unmap_single(pdev, dma, wrb->frag_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma, wrb->frag_len, + PCI_DMA_TODEVICE); + } +} static int make_tx_wrbs(struct be_adapter *adapter, struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) { - u64 busaddr; - u32 i, copied = 0; + dma_addr_t busaddr; + int i, copied = 0; struct pci_dev *pdev = adapter->pdev; struct sk_buff *first_skb = skb; struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_wrb *wrb; struct be_eth_hdr_wrb *hdr; + bool map_single = false; + u16 map_head; hdr = queue_head_node(txq); - atomic_add(wrb_cnt, &txq->used); queue_head_inc(txq); + map_head = txq->head; if (skb->len > skb->data_len) { - int len = skb->len - skb->data_len; + int len = skb_headlen(skb); busaddr = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, busaddr)) + goto dma_err; + map_single = true; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, len); be_dws_cpu_to_le(wrb, sizeof(*wrb)); @@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, busaddr = pci_map_page(pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, busaddr)) + goto dma_err; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, frag->size); be_dws_cpu_to_le(wrb, sizeof(*wrb)); @@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter, be_dws_cpu_to_le(hdr, sizeof(*hdr)); return copied; +dma_err: + txq->head = map_head; + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(pdev, wrb, map_single); + map_single = false; + copied -= wrb->frag_len; + queue_head_inc(txq); + } + return 0; } static netdev_tx_t be_xmit(struct sk_buff *skb, @@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, * *BEFORE* ringing the tx doorbell, so that we serialze the * tx compls of the current transmit which'll wake up the queue */ + atomic_add(wrb_cnt, &txq->used); if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { netif_stop_queue(netdev); @@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + if (!be_physfn(adapter)) + return; + adapter->vlan_tag[vid] = 1; adapter->vlans_added++; if (adapter->vlans_added <= (adapter->max_vlans + 1)) @@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + if (!be_physfn(adapter)) + return; + adapter->vlan_tag[vid] = 0; vlan_group_set_device(adapter->vlan_grp, vid, NULL); adapter->vlans_added--; @@ -588,6 +639,28 @@ done: return; } +static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!adapter->sriov_enabled) + return -EPERM; + + if (!is_valid_ether_addr(mac) || (vf >= num_vfs)) + return -EINVAL; + + status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf], + adapter->vf_pmac_id[vf]); + + status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf], + &adapter->vf_pmac_id[vf]); + if (!status) + dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", + mac, vf); + return status; +} + static void be_rx_rate_update(struct be_adapter *adapter) { struct be_drvr_stats *stats = drvr_stats(adapter); @@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) BUG_ON(!rx_page_info->page); if (rx_page_info->last_page_user) { - pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), + pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), adapter->big_page_size, PCI_DMA_FROMDEVICE); rx_page_info->last_page_user = false; } @@ -791,7 +864,6 @@ static void be_rx_compl_process(struct be_adapter *adapter, skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, adapter->netdev); - skb->dev = adapter->netdev; vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); @@ -807,7 +879,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); } else { netif_receive_skb(skb); @@ -884,7 +956,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, napi_gro_frags(&eq_obj->napi); } else { vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); if (!adapter->vlan_grp || adapter->vlans_added == 0) return; @@ -959,7 +1031,7 @@ static void be_post_rx_frags(struct be_adapter *adapter) } page_offset = page_info->page_offset; page_info->page = pagep; - pci_unmap_addr_set(page_info, bus, page_dmaaddr); + dma_unmap_addr_set(page_info, bus, page_dmaaddr); frag_dmaaddr = page_dmaaddr + page_info->page_offset; rxd = queue_head_node(rxq); @@ -1012,35 +1084,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) struct be_eth_wrb *wrb; struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; struct sk_buff *sent_skb; - u64 busaddr; - u16 cur_index, num_wrbs = 0; + u16 cur_index, num_wrbs = 1; /* account for hdr wrb */ + bool unmap_skb_hdr = true; - cur_index = txq->tail; - sent_skb = sent_skbs[cur_index]; + sent_skb = sent_skbs[txq->tail]; BUG_ON(!sent_skb); - sent_skbs[cur_index] = NULL; - wrb = queue_tail_node(txq); - be_dws_le_to_cpu(wrb, sizeof(*wrb)); - busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; - if (busaddr != 0) { - pci_unmap_single(adapter->pdev, busaddr, - wrb->frag_len, PCI_DMA_TODEVICE); - } - num_wrbs++; + sent_skbs[txq->tail] = NULL; + + /* skip header wrb */ queue_tail_inc(txq); - while (cur_index != last_index) { + do { cur_index = txq->tail; wrb = queue_tail_node(txq); - be_dws_le_to_cpu(wrb, sizeof(*wrb)); - busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; - if (busaddr != 0) { - pci_unmap_page(adapter->pdev, busaddr, - wrb->frag_len, PCI_DMA_TODEVICE); - } + unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && + skb_headlen(sent_skb))); + unmap_skb_hdr = false; + num_wrbs++; queue_tail_inc(txq); - } + } while (cur_index != last_index); atomic_sub(num_wrbs, &txq->used); @@ -1255,6 +1318,8 @@ static int be_tx_queues_create(struct be_adapter *adapter) /* Ask BE to create Tx Event queue */ if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) goto tx_eq_free; + adapter->base_eq_id = adapter->tx_eq.q.id; + /* Alloc TX eth compl queue */ cq = &adapter->tx_obj.cq; if (be_queue_alloc(adapter, cq, TX_CQ_LEN, @@ -1382,7 +1447,7 @@ rx_eq_free: /* There are 8 evt ids per func. Retruns the evt id's bit number */ static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) { - return eq_id % 8; + return eq_id - adapter->base_eq_id; } static irqreturn_t be_intx(int irq, void *dev) @@ -1560,6 +1625,28 @@ static void be_msix_enable(struct be_adapter *adapter) return; } +static void be_sriov_enable(struct be_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + int status; + if (be_physfn(adapter) && num_vfs) { + status = pci_enable_sriov(adapter->pdev, num_vfs); + adapter->sriov_enabled = status ? false : true; + } +#endif + return; +} + +static void be_sriov_disable(struct be_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + if (adapter->sriov_enabled) { + pci_disable_sriov(adapter->pdev); + adapter->sriov_enabled = false; + } +#endif +} + static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) { return adapter->msix_entries[ @@ -1617,6 +1704,9 @@ static int be_irq_register(struct be_adapter *adapter) status = be_msix_register(adapter); if (status == 0) goto done; + /* INTx is not supported for VF */ + if (!be_physfn(adapter)) + return status; } /* INTx */ @@ -1690,14 +1780,17 @@ static int be_open(struct net_device *netdev) goto ret_sts; be_link_status_update(adapter, link_up); - status = be_vid_config(adapter); + if (be_physfn(adapter)) + status = be_vid_config(adapter); if (status) goto ret_sts; - status = be_cmd_set_flow_control(adapter, - adapter->tx_fc, adapter->rx_fc); - if (status) - goto ret_sts; + if (be_physfn(adapter)) { + status = be_cmd_set_flow_control(adapter, + adapter->tx_fc, adapter->rx_fc); + if (status) + goto ret_sts; + } schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); ret_sts: @@ -1723,7 +1816,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { dev_err(&adapter->pdev->dev, - "Could not enable Wake-on-lan \n"); + "Could not enable Wake-on-lan\n"); pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); return status; @@ -1745,22 +1838,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) static int be_setup(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; - u32 cap_flags, en_flags; + u32 cap_flags, en_flags, vf = 0; int status; + u8 mac[ETH_ALEN]; - cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MCAST_PROMISCUOUS | - BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_PASS_L3L4_ERRORS; - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_PASS_L3L4_ERRORS; + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; + + if (be_physfn(adapter)) { + cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | + BE_IF_FLAGS_PROMISCUOUS | + BE_IF_FLAGS_PASS_L3L4_ERRORS; + en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; + } status = be_cmd_if_create(adapter, cap_flags, en_flags, netdev->dev_addr, false/* pmac_invalid */, - &adapter->if_handle, &adapter->pmac_id); + &adapter->if_handle, &adapter->pmac_id, 0); if (status != 0) goto do_none; + if (be_physfn(adapter)) { + while (vf < num_vfs) { + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED + | BE_IF_FLAGS_BROADCAST; + status = be_cmd_if_create(adapter, cap_flags, en_flags, + mac, true, &adapter->vf_if_handle[vf], + NULL, vf+1); + if (status) { + dev_err(&adapter->pdev->dev, + "Interface Create failed for VF %d\n", vf); + goto if_destroy; + } + vf++; + } while (vf < num_vfs); + } else if (!be_physfn(adapter)) { + status = be_cmd_mac_addr_query(adapter, mac, + MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); + if (!status) { + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + } + } + status = be_tx_queues_create(adapter); if (status != 0) goto if_destroy; @@ -1782,6 +1901,9 @@ rx_qs_destroy: tx_qs_destroy: be_tx_queues_destroy(adapter); if_destroy: + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_if_handle[vf]) + be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]); be_cmd_if_destroy(adapter, adapter->if_handle); do_none: return status; @@ -1855,7 +1977,7 @@ static bool be_flash_redboot(struct be_adapter *adapter, p += crc_offset; status = be_cmd_get_flash_crc(adapter, flashed_crc, - (img_start + image_size - 4)); + (image_size - 4)); if (status) { dev_err(&adapter->pdev->dev, "could not get crc from flash, not flashing redboot\n"); @@ -1991,7 +2113,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) struct flash_file_hdr_g3 *fhdr3; struct image_hdr *img_hdr_ptr = NULL; struct be_dma_mem flash_cmd; - int status, i = 0; + int status, i = 0, num_imgs = 0; const u8 *p; strcpy(fw_file, func); @@ -2017,15 +2139,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) if ((adapter->generation == BE_GEN3) && (get_ufigen_type(fhdr) == BE_GEN3)) { fhdr3 = (struct flash_file_hdr_g3 *) fw->data; - for (i = 0; i < fhdr3->num_imgs; i++) { + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { img_hdr_ptr = (struct image_hdr *) (fw->data + (sizeof(struct flash_file_hdr_g3) + - i * sizeof(struct image_hdr))); - if (img_hdr_ptr->imageid == 1) { - status = be_flash_data(adapter, fw, - &flash_cmd, fhdr3->num_imgs); - } - + i * sizeof(struct image_hdr))); + if (le32_to_cpu(img_hdr_ptr->imageid) == 1) + status = be_flash_data(adapter, fw, &flash_cmd, + num_imgs); } } else if ((adapter->generation == BE_GEN2) && (get_ufigen_type(fhdr) == BE_GEN2)) { @@ -2062,6 +2183,7 @@ static struct net_device_ops be_netdev_ops = { .ndo_vlan_rx_register = be_vlan_register, .ndo_vlan_rx_add_vid = be_vlan_add_vid, .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, + .ndo_set_vf_mac = be_set_vf_mac }; static void be_netdev_init(struct net_device *netdev) @@ -2103,37 +2225,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) iounmap(adapter->csr); if (adapter->db) iounmap(adapter->db); - if (adapter->pcicfg) + if (adapter->pcicfg && be_physfn(adapter)) iounmap(adapter->pcicfg); } static int be_map_pci_bars(struct be_adapter *adapter) { u8 __iomem *addr; - int pcicfg_reg; + int pcicfg_reg, db_reg; - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), - pci_resource_len(adapter->pdev, 2)); - if (addr == NULL) - return -ENOMEM; - adapter->csr = addr; - - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), - 128 * 1024); - if (addr == NULL) - goto pci_map_err; - adapter->db = addr; + if (be_physfn(adapter)) { + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), + pci_resource_len(adapter->pdev, 2)); + if (addr == NULL) + return -ENOMEM; + adapter->csr = addr; + } - if (adapter->generation == BE_GEN2) + if (adapter->generation == BE_GEN2) { pcicfg_reg = 1; - else + db_reg = 4; + } else { pcicfg_reg = 0; - - addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), - pci_resource_len(adapter->pdev, pcicfg_reg)); + if (be_physfn(adapter)) + db_reg = 4; + else + db_reg = 0; + } + addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg), + pci_resource_len(adapter->pdev, db_reg)); if (addr == NULL) goto pci_map_err; - adapter->pcicfg = addr; + adapter->db = addr; + + if (be_physfn(adapter)) { + addr = ioremap_nocache( + pci_resource_start(adapter->pdev, pcicfg_reg), + pci_resource_len(adapter->pdev, pcicfg_reg)); + if (addr == NULL) + goto pci_map_err; + adapter->pcicfg = addr; + } else + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; return 0; pci_map_err: @@ -2247,6 +2380,8 @@ static void __devexit be_remove(struct pci_dev *pdev) be_ctrl_cleanup(adapter); + be_sriov_disable(adapter); + be_msix_disable(adapter); pci_set_drvdata(pdev, NULL); @@ -2271,16 +2406,20 @@ static int be_get_config(struct be_adapter *adapter) return status; memset(mac, 0, ETH_ALEN); - status = be_cmd_mac_addr_query(adapter, mac, + + if (be_physfn(adapter)) { + status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); - if (status) - return status; - if (!is_valid_ether_addr(mac)) - return -EADDRNOTAVAIL; + if (status) + return status; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); - memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + } if (adapter->cap & 0x400) adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; @@ -2297,6 +2436,7 @@ static int __devinit be_probe(struct pci_dev *pdev, struct be_adapter *adapter; struct net_device *netdev; + status = pci_enable_device(pdev); if (status) goto do_none; @@ -2345,24 +2485,28 @@ static int __devinit be_probe(struct pci_dev *pdev, } } + be_sriov_enable(adapter); + status = be_ctrl_init(adapter); if (status) goto free_netdev; /* sync up with fw's ready state */ - status = be_cmd_POST(adapter); - if (status) - goto ctrl_clean; + if (be_physfn(adapter)) { + status = be_cmd_POST(adapter); + if (status) + goto ctrl_clean; + + status = be_cmd_reset_function(adapter); + if (status) + goto ctrl_clean; + } /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) goto ctrl_clean; - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - status = be_stats_init(adapter); if (status) goto ctrl_clean; @@ -2392,6 +2536,7 @@ ctrl_clean: be_ctrl_cleanup(adapter); free_netdev: be_msix_disable(adapter); + be_sriov_disable(adapter); free_netdev(adapter->netdev); pci_set_drvdata(pdev, NULL); rel_reg: @@ -2588,6 +2733,13 @@ static int __init be_init_module(void) rx_frag_size = 2048; } + if (num_vfs > 32) { + printk(KERN_WARNING DRV_NAME + " : Module param num_vfs must not be greater than 32." + "Using 32\n"); + num_vfs = 32; + } + return pci_register_driver(&be_driver); } module_init(be_init_module); diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 587f93cf03f..c488cea8f45 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -812,14 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev) static void bfin_mac_multicast_hash(struct net_device *dev) { u32 emac_hashhi, emac_hashlo; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; emac_hashhi = emac_hashlo = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* skip non-multicast addresses */ if (!(*addrs & 1)) diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 119468e7632..44ceecf9d10 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -20,6 +20,7 @@ #include <linux/crc32.h> #include <linux/bitrev.h> #include <linux/ethtool.h> +#include <linux/slab.h> #include <asm/prom.h> #include <asm/dbdma.h> #include <asm/io.h> @@ -971,7 +972,7 @@ bmac_remove_multi(struct net_device *dev, */ static void bmac_set_multicast(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct bmac_data *bp = netdev_priv(dev); int num_addrs = netdev_mc_count(dev); unsigned short rx_cfg; @@ -1000,8 +1001,8 @@ static void bmac_set_multicast(struct net_device *dev) rx_cfg = bmac_rx_on(dev, 0, 0); XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); } else { - netdev_for_each_mc_addr(dmi, dev) - bmac_addhash(bp, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + bmac_addhash(bp, ha->addr); bmac_update_hash_table_mask(dev, bp); rx_cfg = bmac_rx_on(dev, 1, 0); XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); @@ -1015,7 +1016,7 @@ static void bmac_set_multicast(struct net_device *dev) static void bmac_set_multicast(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i; unsigned short rx_cfg; @@ -1039,8 +1040,8 @@ static void bmac_set_multicast(struct net_device *dev) for(i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if(!(*addrs & 1)) continue; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 381887ba677..53326fed6c8 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = { MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); +static void bnx2_init_napi(struct bnx2 *bp); + static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) { u32 diff; @@ -2668,7 +2670,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) } rx_pg->page = page; - pci_unmap_addr_set(rx_pg, mapping, mapping); + dma_unmap_addr_set(rx_pg, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; return 0; @@ -2683,7 +2685,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) if (!page) return; - pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, + pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE, PCI_DMA_FROMDEVICE); __free_page(page); @@ -2715,7 +2717,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) } rx_buf->skb = skb; - pci_unmap_addr_set(rx_buf, mapping, mapping); + dma_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; @@ -2814,7 +2816,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } } - pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), + pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); tx_buf->skb = NULL; @@ -2824,7 +2826,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) sw_cons = NEXT_TX_BD(sw_cons); pci_unmap_page(bp->pdev, - pci_unmap_addr( + dma_unmap_addr( &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], mapping), skb_shinfo(skb)->frags[i].size, @@ -2906,8 +2908,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, if (prod != cons) { prod_rx_pg->page = cons_rx_pg->page; cons_rx_pg->page = NULL; - pci_unmap_addr_set(prod_rx_pg, mapping, - pci_unmap_addr(cons_rx_pg, mapping)); + dma_unmap_addr_set(prod_rx_pg, mapping, + dma_unmap_addr(cons_rx_pg, mapping)); prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; @@ -2931,7 +2933,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, prod_rx_buf = &rxr->rx_buf_ring[prod]; pci_dma_sync_single_for_device(bp->pdev, - pci_unmap_addr(cons_rx_buf, mapping), + dma_unmap_addr(cons_rx_buf, mapping), BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); rxr->rx_prod_bseq += bp->rx_buf_use_size; @@ -2941,8 +2943,8 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, if (cons == prod) return; - pci_unmap_addr_set(prod_rx_buf, mapping, - pci_unmap_addr(cons_rx_buf, mapping)); + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; @@ -3015,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, /* Don't unmap yet. If we're unable to allocate a new * page, we need to recycle the page and the DMA addr. */ - mapping_old = pci_unmap_addr(rx_pg, mapping); + mapping_old = dma_unmap_addr(rx_pg, mapping); if (i == pages - 1) frag_len -= 4; @@ -3096,7 +3098,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) rx_buf->skb = NULL; - dma_addr = pci_unmap_addr(rx_buf, mapping); + dma_addr = dma_unmap_addr(rx_buf, mapping); pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, @@ -3544,7 +3546,6 @@ bnx2_set_rx_mode(struct net_device *dev) } else { /* Accept one or more multicast(s). */ - struct dev_mc_list *mclist; u32 mc_filter[NUM_MC_HASH_REGISTERS]; u32 regidx; u32 bit; @@ -3552,8 +3553,8 @@ bnx2_set_rx_mode(struct net_device *dev) memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); - netdev_for_each_mc_addr(mclist, dev) { - crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & 0xff; regidx = (bit & 0xe0) >> 5; bit &= 0x1f; @@ -5310,7 +5311,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) } pci_unmap_single(bp->pdev, - pci_unmap_addr(tx_buf, mapping), + dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); @@ -5321,7 +5322,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) for (k = 0; k < last; k++, j++) { tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; pci_unmap_page(bp->pdev, - pci_unmap_addr(tx_buf, mapping), + dma_unmap_addr(tx_buf, mapping), skb_shinfo(skb)->frags[k].size, PCI_DMA_TODEVICE); } @@ -5351,7 +5352,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) continue; pci_unmap_single(bp->pdev, - pci_unmap_addr(rx_buf, mapping), + dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -5761,7 +5762,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) skb_reserve(rx_skb, BNX2_RX_OFFSET); pci_dma_sync_single_for_cpu(bp->pdev, - pci_unmap_addr(rx_buf, mapping), + dma_unmap_addr(rx_buf, mapping), bp->rx_buf_size, PCI_DMA_FROMDEVICE); if (rx_hdr->l2_fhdr_status & @@ -6197,6 +6198,7 @@ bnx2_open(struct net_device *dev) bnx2_disable_int(bp); bnx2_setup_int_mode(bp, disable_msi); + bnx2_init_napi(bp); bnx2_napi_enable(bp); rc = bnx2_alloc_mem(bp); if (rc) @@ -6420,7 +6422,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = skb; - pci_unmap_addr_set(tx_buf, mapping, mapping); + dma_unmap_addr_set(tx_buf, mapping, mapping); txbd = &txr->tx_desc_ring[ring_prod]; @@ -6445,7 +6447,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(bp->pdev, mapping)) goto dma_error; - pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, + dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, mapping); txbd->tx_bd_haddr_hi = (u64) mapping >> 32; @@ -6482,7 +6484,7 @@ dma_error: ring_prod = TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; - pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), + pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); /* unmap remaining mapped pages */ @@ -6490,7 +6492,7 @@ dma_error: prod = NEXT_TX_BD(prod); ring_prod = TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; - pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), + pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); } @@ -7643,9 +7645,11 @@ poll_bnx2(struct net_device *dev) int i; for (i = 0; i < bp->irq_nvecs; i++) { - disable_irq(bp->irq_tbl[i].vector); - bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); - enable_irq(bp->irq_tbl[i].vector); + struct bnx2_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, &bp->bnx2_napi[i]); + enable_irq(irq->vector); } } #endif @@ -8207,7 +8211,7 @@ bnx2_init_napi(struct bnx2 *bp) { int i; - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + for (i = 0; i < bp->irq_nvecs; i++) { struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; int (*poll)(struct napi_struct *, int); @@ -8276,7 +8280,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &bnx2_ethtool_ops; bp = netdev_priv(dev); - bnx2_init_napi(bp); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index cd4b0e4637a..ab34a5d86f8 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6551,17 +6551,17 @@ struct l2_fhdr { struct sw_bd { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; struct sw_pg { struct page *page; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; struct sw_tx_bd { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); unsigned short is_gso; unsigned short nr_frags; }; diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 3c48a7a6830..ae9c89ebcc8 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h @@ -163,7 +163,7 @@ do { \ struct sw_rx_bd { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; struct sw_tx_bd { @@ -176,7 +176,7 @@ struct sw_tx_bd { struct sw_rx_page { struct page *page; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; union db_prod { diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c index 32e79c359e8..ff70be89876 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x_link.c @@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params, MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; - DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", + DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); bnx2x_pause_resolve(vars, pause_result); if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE && @@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params, MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; bnx2x_pause_resolve(vars, pause_result); - DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n", + DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", pause_result); } } @@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params, } } - DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n", + DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", gp_status, vars->phy_link_up, vars->line_speed); DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" " autoneg 0x%x\n", @@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) SPEED_AUTO_NEG) && ((params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { - DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); @@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) ext_phy_addr, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); - DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1); + DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); } else if ((params->req_line_speed == SPEED_AUTO_NEG) && ((params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { - DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); bnx2x_cl45_write(bp, params->port, ext_phy_type, ext_phy_addr, MDIO_AN_DEVAD, MDIO_PMA_REG_8727_MISC_CTRL, 0); diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index ed785a30e98..63a17d604a9 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -57,8 +57,8 @@ #include "bnx2x_init_ops.h" #include "bnx2x_dump.h" -#define DRV_MODULE_VERSION "1.52.1-7" -#define DRV_MODULE_RELDATE "2010/02/28" +#define DRV_MODULE_VERSION "1.52.1-8" +#define DRV_MODULE_RELDATE "2010/04/01" #define BNX2X_BC_VER 0x040200 #include <linux/firmware.h> @@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* unmap first bd */ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; - pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); nbd = le16_to_cpu(tx_start_bd->nbd) - 1; @@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; - pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), - BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); + dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); if (--nbd) bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } @@ -893,7 +893,6 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) u16 prod; u16 cons; - barrier(); /* Tell compiler that prod and cons can change */ prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; @@ -963,7 +962,7 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp) * start_xmit() will miss it and cause the queue to be stopped * forever. */ - smp_wmb(); + smp_mb(); /* TBD need a thresh? */ if (unlikely(netif_tx_queue_stopped(txq))) { @@ -1087,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, if (!page) return; - pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); __free_pages(page, PAGES_PER_SGE_SHIFT); @@ -1116,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, if (unlikely(page == NULL)) return -ENOMEM; - mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, - PCI_DMA_FROMDEVICE); + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { __free_pages(page, PAGES_PER_SGE_SHIFT); return -ENOMEM; } sw_buf->page = page; - pci_unmap_addr_set(sw_buf, mapping, mapping); + dma_unmap_addr_set(sw_buf, mapping, mapping); sge->addr_hi = cpu_to_le32(U64_HI(mapping)); sge->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -1144,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, if (unlikely(skb == NULL)) return -ENOMEM; - mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, - PCI_DMA_FROMDEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { dev_kfree_skb(skb); return -ENOMEM; } rx_buf->skb = skb; - pci_unmap_addr_set(rx_buf, mapping, mapping); + dma_unmap_addr_set(rx_buf, mapping, mapping); rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -1174,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; - pci_dma_sync_single_for_device(bp->pdev, - pci_unmap_addr(cons_rx_buf, mapping), - RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(cons_rx_buf, mapping), + RX_COPY_THRESH, DMA_FROM_DEVICE); prod_rx_buf->skb = cons_rx_buf->skb; - pci_unmap_addr_set(prod_rx_buf, mapping, - pci_unmap_addr(cons_rx_buf, mapping)); + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); *prod_bd = *cons_bd; } @@ -1284,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, /* move empty skb from pool to prod and map it */ prod_rx_buf->skb = fp->tpa_pool[queue].skb; - mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, - bp->rx_buf_size, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(prod_rx_buf, mapping, mapping); + mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, + bp->rx_buf_size, DMA_FROM_DEVICE); + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); /* move partial skb from cons to pool (don't unmap yet) */ fp->tpa_pool[queue] = *cons_rx_buf; @@ -1362,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, } /* Unmap the page as we r going to pass it to the stack */ - pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); @@ -1390,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ - pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_skb)) { /* fix ip xsum and give it to the stack */ @@ -1442,12 +1442,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, #ifdef BCM_VLAN if ((bp->vlgrp != NULL) && is_vlan_cqe && (!is_not_hwaccel_vlan_cqe)) - vlan_hwaccel_receive_skb(skb, bp->vlgrp, - le16_to_cpu(cqe->fast_path_cqe. - vlan_tag)); + vlan_gro_receive(&fp->napi, bp->vlgrp, + le16_to_cpu(cqe->fast_path_cqe. + vlan_tag), skb); else #endif - netif_receive_skb(skb); + napi_gro_receive(&fp->napi, skb); } else { DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" " - dropping packet!\n"); @@ -1621,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) } } - pci_dma_sync_single_for_device(bp->pdev, - pci_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); prefetch(skb); prefetch(((char *)(skb)) + 128); @@ -1666,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) } else if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { - pci_unmap_single(bp->pdev, - pci_unmap_addr(rx_buf, mapping), + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), bp->rx_buf_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb_reserve(skb, pad); skb_put(skb, len); @@ -1700,11 +1700,11 @@ reuse_rx: if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & PARSING_FLAGS_VLAN)) - vlan_hwaccel_receive_skb(skb, bp->vlgrp, - le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); + vlan_gro_receive(&fp->napi, bp->vlgrp, + le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); else #endif - netif_receive_skb(skb); + napi_gro_receive(&fp->napi, skb); next_rx: @@ -4941,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, } if (fp->tpa_state[i] == BNX2X_TPA_START) - pci_unmap_single(bp->pdev, - pci_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); dev_kfree_skb(skb); rx_buf->skb = NULL; @@ -4979,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) fp->disable_tpa = 1; break; } - pci_unmap_addr_set((struct sw_rx_bd *) + dma_unmap_addr_set((struct sw_rx_bd *) &bp->fp->tpa_pool[i], mapping, 0); fp->tpa_state[i] = BNX2X_TPA_STOP; @@ -5659,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) static int bnx2x_gunzip_init(struct bnx2x *bp) { - bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, - &bp->gunzip_mapping); + bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, + &bp->gunzip_mapping, GFP_KERNEL); if (bp->gunzip_buf == NULL) goto gunzip_nomem1; @@ -5680,8 +5680,8 @@ gunzip_nomem3: bp->strm = NULL; gunzip_nomem2: - pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); bp->gunzip_buf = NULL; gunzip_nomem1: @@ -5697,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp) bp->strm = NULL; if (bp->gunzip_buf) { - pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, - bp->gunzip_mapping); + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); bp->gunzip_buf = NULL; } } @@ -6693,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) #define BNX2X_PCI_FREE(x, y, size) \ do { \ if (x) { \ - pci_free_consistent(bp->pdev, size, x, y); \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ x = NULL; \ y = 0; \ } \ @@ -6774,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) #define BNX2X_PCI_ALLOC(x, y, size) \ do { \ - x = pci_alloc_consistent(bp->pdev, size, y); \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ if (x == NULL) \ goto alloc_mem_err; \ memset(x, 0, size); \ @@ -6907,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) if (skb == NULL) continue; - pci_unmap_single(bp->pdev, - pci_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_size, DMA_FROM_DEVICE); rx_buf->skb = NULL; dev_kfree_skb(skb); @@ -8936,6 +8936,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bp->multi_mode = multi_mode; + bp->dev->features |= NETIF_F_GRO; + /* Set TPA flags */ if (disable_tpa) { bp->flags &= ~TPA_ENABLE_FLAG; @@ -10268,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) bd_prod = TX_BD(fp_tx->tx_bd_prod); tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; - mapping = pci_map_single(bp->pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ @@ -11315,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - mapping = pci_map_single(bp->pdev, skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -11373,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (total_pkt_bd == NULL) total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + mapping = dma_map_page(&bp->pdev->dev, frag->page, + frag->page_offset, + frag->size, DMA_TO_DEVICE); tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -11429,9 +11432,12 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { netif_tx_stop_queue(txq); - /* We want bnx2x_tx_int to "see" the updated tx_bd_prod - if we put Tx into XOFF state. */ + + /* paired memory barrier is in bnx2x_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons */ smp_mb(); + fp->eth_q_stats.driver_xoff++; if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) netif_tx_wake_queue(txq); @@ -11492,21 +11498,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev) else { /* some multicasts */ if (CHIP_IS_E1(bp)) { int i, old, offset; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct mac_configuration_cmd *config = bnx2x_sp(bp, mcast_config); i = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { config->config_table[i]. cam_entry.msb_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[0]); + swab16(*(u16 *)&ha->addr[0]); config->config_table[i]. cam_entry.middle_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[2]); + swab16(*(u16 *)&ha->addr[2]); config->config_table[i]. cam_entry.lsb_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[4]); + swab16(*(u16 *)&ha->addr[4]); config->config_table[i].cam_entry.flags = cpu_to_le16(port); config->config_table[i]. @@ -11560,18 +11566,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev) 0); } else { /* E1H */ /* Accept one or more multicasts */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[MC_HASH_SIZE]; u32 crc, bit, regidx; int i; memset(mc_filter, 0, 4 * MC_HASH_SIZE); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", - mclist->dmi_addr); + ha->addr); - crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); + crc = crc32c_le(0, ha->addr, ETH_ALEN); bit = (crc >> 24) & 0xff; regidx = bit >> 5; bit &= 0x1f; @@ -11828,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, goto err_out_release; } - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) { bp->flags |= USING_DAC_FLAG; - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - pr_err("pci_set_consistent_dma_mask failed, aborting\n"); + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) { + pr_err("dma_set_coherent_mask failed, aborting\n"); rc = -EIO; goto err_out_release; } - } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { pr_err("System does not support DMA, aborting\n"); rc = -EIO; goto err_out_release; diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c index 6dd64cf3cb7..969ffed86b9 100644 --- a/drivers/net/bonding/bond_ipv6.c +++ b/drivers/net/bonding/bond_ipv6.c @@ -37,7 +37,6 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) { struct inet6_dev *idev; - struct inet6_ifaddr *ifa; if (!dev) return; @@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) return; read_lock_bh(&idev->lock); - ifa = idev->addr_list; - if (ifa) + if (!list_empty(&idev->addr_list)) { + struct inet6_ifaddr *ifa + = list_first_entry(&idev->addr_list, + struct inet6_ifaddr, if_list); ipv6_addr_copy(addr, &ifa->addr); - else + } else ipv6_addr_set(addr, 0, 0, 0, 0); read_unlock_bh(&idev->lock); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 430c02267d7..85e813c7762 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -762,32 +762,6 @@ static int bond_check_dev_link(struct bonding *bond, /*----------------------------- Multicast list ------------------------------*/ /* - * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise - */ -static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1, - const struct dev_mc_list *dmi2) -{ - return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 && - dmi1->dmi_addrlen == dmi2->dmi_addrlen; -} - -/* - * returns dmi entry if found, NULL otherwise - */ -static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi, - struct dev_mc_list *mc_list) -{ - struct dev_mc_list *idmi; - - for (idmi = mc_list; idmi; idmi = idmi->next) { - if (bond_is_dmi_same(dmi, idmi)) - return idmi; - } - - return NULL; -} - -/* * Push the promiscuity flag down to appropriate slaves */ static int bond_set_promiscuity(struct bonding *bond, int inc) @@ -839,18 +813,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc) * Add a Multicast address to slaves * according to mode */ -static void bond_mc_add(struct bonding *bond, void *addr, int alen) +static void bond_mc_add(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) - dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0); + dev_mc_add(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) - dev_mc_add(slave->dev, addr, alen, 0); + dev_mc_add(slave->dev, addr); } } @@ -858,18 +832,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen) * Remove a multicast address from slave * according to mode */ -static void bond_mc_delete(struct bonding *bond, void *addr, int alen) +static void bond_mc_del(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) - dev_mc_delete(bond->curr_active_slave->dev, addr, - alen, 0); + dev_mc_del(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { - dev_mc_delete(slave->dev, addr, alen, 0); + dev_mc_del(slave->dev, addr); } } } @@ -896,66 +869,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) } /* - * Totally destroys the mc_list in bond - */ -static void bond_mc_list_destroy(struct bonding *bond) -{ - struct dev_mc_list *dmi; - - dmi = bond->mc_list; - while (dmi) { - bond->mc_list = dmi->next; - kfree(dmi); - dmi = bond->mc_list; - } - - bond->mc_list = NULL; -} - -/* - * Copy all the Multicast addresses from src to the bonding device dst - */ -static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, - gfp_t gfp_flag) -{ - struct dev_mc_list *dmi, *new_dmi; - - for (dmi = mc_list; dmi; dmi = dmi->next) { - new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag); - - if (!new_dmi) { - /* FIXME: Potential memory leak !!! */ - return -ENOMEM; - } - - new_dmi->next = bond->mc_list; - bond->mc_list = new_dmi; - new_dmi->dmi_addrlen = dmi->dmi_addrlen; - memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen); - new_dmi->dmi_users = dmi->dmi_users; - new_dmi->dmi_gusers = dmi->dmi_gusers; - } - - return 0; -} - -/* * flush all members of flush->mc_list from device dev->mc_list */ static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) - dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond_dev) + dev_mc_del(slave_dev, ha->addr); if (bond->params.mode == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0); + dev_mc_del(slave_dev, lacpdu_multicast); } } @@ -969,7 +898,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev, static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (!USES_PRIMARY(bond->params.mode)) /* nothing to do - mc list is already up-to-date on @@ -984,9 +913,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); - for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) - dev_mc_delete(old_active->dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond->dev) + dev_mc_del(old_active->dev, ha->addr); } if (new_active) { @@ -997,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); - for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) - dev_mc_add(new_active->dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond->dev) + dev_mc_add(new_active->dev, ha->addr); bond_resend_igmp_join_requests(bond); } } @@ -1235,6 +1162,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) write_lock_bh(&bond->curr_slave_lock); } } + + /* resend IGMP joins since all were sent on curr_active_slave */ + if (bond->params.mode == BOND_MODE_ROUNDROBIN) { + bond_resend_igmp_join_requests(bond); + } } /** @@ -1406,7 +1338,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct sockaddr addr; int link_reporting; int old_features = bond_dev->features; @@ -1480,14 +1412,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, bond_dev->type, slave_dev->type); - netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE); + res = netdev_bonding_change(bond_dev, + NETDEV_PRE_TYPE_CHANGE); + res = notifier_to_errno(res); + if (res) { + pr_err("%s: refused to change device type\n", + bond_dev->name); + res = -EBUSY; + goto err_undo_flags; + } + + /* Flush unicast and multicast addresses */ + dev_uc_flush(bond_dev); + dev_mc_flush(bond_dev); if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); else ether_setup(bond_dev); - netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE); + netdev_bonding_change(bond_dev, + NETDEV_POST_TYPE_CHANGE); } } else if (bond_dev->type != slave_dev->type) { pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", @@ -1588,9 +1533,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) netif_addr_lock_bh(bond_dev); /* upload master's mc_list to new slave */ - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) - dev_mc_add(slave_dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond_dev) + dev_mc_add(slave_dev, ha->addr); netif_addr_unlock_bh(bond_dev); } @@ -1598,7 +1542,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* add lacpdu mc addr to mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0); + dev_mc_add(slave_dev, lacpdu_multicast); } bond_add_vlans_on_slave(bond, slave_dev); @@ -3900,10 +3844,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return res; } +static bool bond_addr_in_mc_list(unsigned char *addr, + struct netdev_hw_addr_list *list, + int addrlen) +{ + struct netdev_hw_addr *ha; + + netdev_hw_addr_list_for_each(ha, list) + if (!memcmp(ha->addr, addr, addrlen)) + return true; + + return false; +} + static void bond_set_multicast_list(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; + bool found; /* * Do promisc before checking multicast_mode @@ -3938,20 +3896,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev) bond->flags = bond_dev->flags; /* looking for addresses to add to slaves' mc list */ - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { - if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) - bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen); + netdev_for_each_mc_addr(ha, bond_dev) { + found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, + bond_dev->addr_len); + if (!found) + bond_mc_add(bond, ha->addr); } /* looking for addresses to delete from slaves' list */ - for (dmi = bond->mc_list; dmi; dmi = dmi->next) { - if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) - bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen); + netdev_hw_addr_list_for_each(ha, &bond->mc_list) { + found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, + bond_dev->addr_len); + if (!found) + bond_mc_del(bond, ha->addr); } /* save master's multicast list */ - bond_mc_list_destroy(bond); - bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); + __hw_addr_flush(&bond->mc_list); + __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc, + bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST); read_unlock(&bond->lock); } @@ -4138,22 +4101,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *start_at; int i, slave_no, res = 1; + struct iphdr *iph = ip_hdr(skb); read_lock(&bond->lock); if (!BOND_IS_OK(bond)) goto out; - /* - * Concurrent TX may collide on rr_tx_counter; we accept that - * as being rare enough not to justify using an atomic op here + * Start with the curr_active_slave that joined the bond as the + * default for sending IGMP traffic. For failover purposes one + * needs to maintain some consistency for the interface that will + * send the join/membership reports. The curr_active_slave found + * will send all of this type of traffic. */ - slave_no = bond->rr_tx_counter++ % bond->slave_cnt; + if ((iph->protocol == IPPROTO_IGMP) && + (skb->protocol == htons(ETH_P_IP))) { - bond_for_each_slave(bond, slave, i) { - slave_no--; - if (slave_no < 0) - break; + read_lock(&bond->curr_slave_lock); + slave = bond->curr_active_slave; + read_unlock(&bond->curr_slave_lock); + + if (!slave) + goto out; + } else { + /* + * Concurrent TX may collide on rr_tx_counter; we accept + * that as being rare enough not to justify using an + * atomic op here. + */ + slave_no = bond->rr_tx_counter++ % bond->slave_cnt; + + bond_for_each_slave(bond, slave, i) { + slave_no--; + if (slave_no < 0) + break; + } } start_at = slave; @@ -4426,6 +4408,14 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, }; +static void bond_destructor(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + if (bond->wq) + destroy_workqueue(bond->wq); + free_netdev(bond_dev); +} + static void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -4446,7 +4436,7 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->ethtool_ops = &bond_ethtool_ops; bond_set_mode_ops(bond, bond->params.mode); - bond_dev->destructor = free_netdev; + bond_dev->destructor = bond_destructor; /* Initialize the device options */ bond_dev->tx_queue_len = 0; @@ -4518,12 +4508,7 @@ static void bond_uninit(struct net_device *bond_dev) bond_remove_proc_entry(bond); - if (bond->wq) - destroy_workqueue(bond->wq); - - netif_addr_lock_bh(bond_dev); - bond_mc_list_destroy(bond); - netif_addr_unlock_bh(bond_dev); + __hw_addr_flush(&bond->mc_list); } /*------------------------- Module initialization ---------------------------*/ @@ -4654,13 +4639,13 @@ static int bond_check_params(struct bond_params *params) } if (num_grat_arp < 0 || num_grat_arp > 255) { - pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n", + pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n", num_grat_arp); num_grat_arp = 1; } if (num_unsol_na < 0 || num_unsol_na > 255) { - pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n", + pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n", num_unsol_na); num_unsol_na = 1; } @@ -4895,6 +4880,8 @@ static int bond_init(struct net_device *bond_dev) list_add_tail(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); + + __hw_addr_init(&bond->mc_list); return 0; } @@ -4932,8 +4919,8 @@ int bond_create(struct net *net, const char *name) bond_setup); if (!bond_dev) { pr_err("%s: eek! can't alloc netdev!\n", name); - res = -ENOMEM; - goto out; + rtnl_unlock(); + return -ENOMEM; } dev_net_set(bond_dev, net); @@ -4942,19 +4929,16 @@ int bond_create(struct net *net, const char *name) if (!name) { res = dev_alloc_name(bond_dev, "bond%d"); if (res < 0) - goto out_netdev; + goto out; } res = register_netdevice(bond_dev); - if (res < 0) - goto out_netdev; out: rtnl_unlock(); + if (res < 0) + bond_destructor(bond_dev); return res; -out_netdev: - free_netdev(bond_dev); - goto out; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 5acd557cea9..b8bec086daa 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -51,7 +51,9 @@ * "show" function for the bond_masters attribute. * The class parameter is ignored. */ -static ssize_t bonding_show_bonds(struct class *cls, char *buf) +static ssize_t bonding_show_bonds(struct class *cls, + struct class_attribute *attr, + char *buf) { struct net *net = current->nsproxy->net_ns; struct bond_net *bn = net_generic(net, bond_net_id); @@ -98,6 +100,7 @@ static struct net_device *bond_get_by_name(struct net *net, const char *ifname) */ static ssize_t bonding_store_bonds(struct class *cls, + struct class_attribute *attr, const char *buffer, size_t count) { struct net *net = current->nsproxy->net_ns; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 257a7a4dfce..2aa33672059 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -202,7 +202,7 @@ struct bonding { char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - struct dev_mc_list *mc_list; + struct netdev_hw_addr_list mc_list; int (*xmit_hash_policy)(struct sk_buff *, int); __be32 master_ip; u16 flags; diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig new file mode 100644 index 00000000000..0b28e010769 --- /dev/null +++ b/drivers/net/caif/Kconfig @@ -0,0 +1,17 @@ +# +# CAIF physical drivers +# + +if CAIF + +comment "CAIF transport drivers" + +config CAIF_TTY + tristate "CAIF TTY transport driver" + default n + ---help--- + The CAIF TTY transport driver is a Line Discipline (ldisc) + identified as N_CAIF. When this ldisc is opened from user space + it will redirect the TTY's traffic into the CAIF stack. + +endif # CAIF diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile new file mode 100644 index 00000000000..52b6d1f826f --- /dev/null +++ b/drivers/net/caif/Makefile @@ -0,0 +1,12 @@ +ifeq ($(CONFIG_CAIF_DEBUG),1) +CAIF_DBG_FLAGS := -DDEBUG +endif + +KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers + +ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) +clean-dirs:= .tmp_versions +clean-files:= Module.symvers modules.order *.cmd *~ \ + +# Serial interface +obj-$(CONFIG_CAIF_TTY) += caif_serial.o diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c new file mode 100644 index 00000000000..38c0186cfbc --- /dev/null +++ b/drivers/net/caif/caif_serial.c @@ -0,0 +1,446 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland / sjur.brandeland@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/init.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/tty.h> +#include <linux/file.h> +#include <linux/if_arp.h> +#include <net/caif/caif_device.h> +#include <net/caif/cfcnfg.h> +#include <linux/err.h> +#include <linux/debugfs.h> + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>"); +MODULE_DESCRIPTION("CAIF serial device TTY line discipline"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_LDISC(N_CAIF); + +#define SEND_QUEUE_LOW 10 +#define SEND_QUEUE_HIGH 100 +#define CAIF_SENDING 1 /* Bit 1 = 0x02*/ +#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */ +#define MAX_WRITE_CHUNK 4096 +#define ON 1 +#define OFF 0 +#define CAIF_MAX_MTU 4096 + +/*This list is protected by the rtnl lock. */ +static LIST_HEAD(ser_list); + +static int ser_loop; +module_param(ser_loop, bool, S_IRUGO); +MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); + +static int ser_use_stx = 1; +module_param(ser_use_stx, bool, S_IRUGO); +MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); + +static int ser_use_fcs = 1; + +module_param(ser_use_fcs, bool, S_IRUGO); +MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); + +static int ser_write_chunk = MAX_WRITE_CHUNK; +module_param(ser_write_chunk, int, S_IRUGO); + +MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART."); + +static struct dentry *debugfsdir; + +static int caif_net_open(struct net_device *dev); +static int caif_net_close(struct net_device *dev); + +struct ser_device { + struct caif_dev_common common; + struct list_head node; + struct net_device *dev; + struct sk_buff_head head; + struct tty_struct *tty; + bool tx_started; + unsigned long state; + char *tty_name; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_tty_dir; + struct debugfs_blob_wrapper tx_blob; + struct debugfs_blob_wrapper rx_blob; + u8 rx_data[128]; + u8 tx_data[128]; + u8 tty_status; + +#endif +}; + +static void caifdev_setup(struct net_device *dev); +static void ldisc_tx_wakeup(struct tty_struct *tty); +#ifdef CONFIG_DEBUG_FS +static inline void update_tty_status(struct ser_device *ser) +{ + ser->tty_status = + ser->tty->stopped << 5 | + ser->tty->hw_stopped << 4 | + ser->tty->flow_stopped << 3 | + ser->tty->packet << 2 | + ser->tty->low_latency << 1 | + ser->tty->warned; +} +static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) +{ + ser->debugfs_tty_dir = + debugfs_create_dir(tty->name, debugfsdir); + if (!IS_ERR(ser->debugfs_tty_dir)) { + debugfs_create_blob("last_tx_msg", S_IRUSR, + ser->debugfs_tty_dir, + &ser->tx_blob); + + debugfs_create_blob("last_rx_msg", S_IRUSR, + ser->debugfs_tty_dir, + &ser->rx_blob); + + debugfs_create_x32("ser_state", S_IRUSR, + ser->debugfs_tty_dir, + (u32 *)&ser->state); + + debugfs_create_x8("tty_status", S_IRUSR, + ser->debugfs_tty_dir, + &ser->tty_status); + + } + ser->tx_blob.data = ser->tx_data; + ser->tx_blob.size = 0; + ser->rx_blob.data = ser->rx_data; + ser->rx_blob.size = 0; +} + +static inline void debugfs_deinit(struct ser_device *ser) +{ + debugfs_remove_recursive(ser->debugfs_tty_dir); +} + +static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) +{ + if (size > sizeof(ser->rx_data)) + size = sizeof(ser->rx_data); + memcpy(ser->rx_data, data, size); + ser->rx_blob.data = ser->rx_data; + ser->rx_blob.size = size; +} + +static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) +{ + if (size > sizeof(ser->tx_data)) + size = sizeof(ser->tx_data); + memcpy(ser->tx_data, data, size); + ser->tx_blob.data = ser->tx_data; + ser->tx_blob.size = size; +} +#else +static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) +{ +} + +static inline void debugfs_deinit(struct ser_device *ser) +{ +} + +static inline void update_tty_status(struct ser_device *ser) +{ +} + +static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) +{ +} + +static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) +{ +} + +#endif + +static void ldisc_receive(struct tty_struct *tty, const u8 *data, + char *flags, int count) +{ + struct sk_buff *skb = NULL; + struct ser_device *ser; + int ret; + u8 *p; + ser = tty->disc_data; + + /* + * NOTE: flags may contain information about break or overrun. + * This is not yet handled. + */ + + + /* + * Workaround for garbage at start of transmission, + * only enable if STX handling is not enabled. + */ + if (!ser->common.use_stx && !ser->tx_started) { + dev_info(&ser->dev->dev, + "Bytes received before initial transmission -" + "bytes discarded.\n"); + return; + } + + BUG_ON(ser->dev == NULL); + + /* Get a suitable caif packet and copy in data. */ + skb = netdev_alloc_skb(ser->dev, count+1); + BUG_ON(skb == NULL); + p = skb_put(skb, count); + memcpy(p, data, count); + + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + skb->dev = ser->dev; + debugfs_rx(ser, data, count); + /* Push received packet up the stack. */ + ret = netif_rx_ni(skb); + if (!ret) { + ser->dev->stats.rx_packets++; + ser->dev->stats.rx_bytes += count; + } else + ++ser->dev->stats.rx_dropped; + update_tty_status(ser); +} + +static int handle_tx(struct ser_device *ser) +{ + struct tty_struct *tty; + struct sk_buff *skb; + int tty_wr, len, room; + tty = ser->tty; + ser->tx_started = true; + + /* Enter critical section */ + if (test_and_set_bit(CAIF_SENDING, &ser->state)) + return 0; + + /* skb_peek is safe because handle_tx is called after skb_queue_tail */ + while ((skb = skb_peek(&ser->head)) != NULL) { + + /* Make sure you don't write too much */ + len = skb->len; + room = tty_write_room(tty); + if (!room) + break; + if (room > ser_write_chunk) + room = ser_write_chunk; + if (len > room) + len = room; + + /* Write to tty or loopback */ + if (!ser_loop) { + tty_wr = tty->ops->write(tty, skb->data, len); + update_tty_status(ser); + } else { + tty_wr = len; + ldisc_receive(tty, skb->data, NULL, len); + } + ser->dev->stats.tx_packets++; + ser->dev->stats.tx_bytes += tty_wr; + + /* Error on TTY ?! */ + if (tty_wr < 0) + goto error; + /* Reduce buffer written, and discard if empty */ + skb_pull(skb, tty_wr); + if (skb->len == 0) { + struct sk_buff *tmp = skb_dequeue(&ser->head); + BUG_ON(tmp != skb); + if (in_interrupt()) + dev_kfree_skb_irq(skb); + else + kfree_skb(skb); + } + } + /* Send flow off if queue is empty */ + if (ser->head.qlen <= SEND_QUEUE_LOW && + test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) && + ser->common.flowctrl != NULL) + ser->common.flowctrl(ser->dev, ON); + clear_bit(CAIF_SENDING, &ser->state); + return 0; +error: + clear_bit(CAIF_SENDING, &ser->state); + return tty_wr; +} + +static int caif_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ser_device *ser; + BUG_ON(dev == NULL); + ser = netdev_priv(dev); + + /* Send flow off once, on high water mark */ + if (ser->head.qlen > SEND_QUEUE_HIGH && + !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) && + ser->common.flowctrl != NULL) + + ser->common.flowctrl(ser->dev, OFF); + + skb_queue_tail(&ser->head, skb); + return handle_tx(ser); +} + + +static void ldisc_tx_wakeup(struct tty_struct *tty) +{ + struct ser_device *ser; + ser = tty->disc_data; + BUG_ON(ser == NULL); + BUG_ON(ser->tty != tty); + handle_tx(ser); +} + + +static int ldisc_open(struct tty_struct *tty) +{ + struct ser_device *ser; + struct net_device *dev; + char name[64]; + int result; + + /* No write no play */ + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + + sprintf(name, "cf%s", tty->name); + dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); + ser = netdev_priv(dev); + ser->tty = tty_kref_get(tty); + ser->dev = dev; + debugfs_init(ser, tty); + tty->receive_room = N_TTY_BUF_SIZE; + tty->disc_data = ser; + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + rtnl_lock(); + result = register_netdevice(dev); + if (result) { + rtnl_unlock(); + free_netdev(dev); + return -ENODEV; + } + + list_add(&ser->node, &ser_list); + rtnl_unlock(); + netif_stop_queue(dev); + update_tty_status(ser); + return 0; +} + +static void ldisc_close(struct tty_struct *tty) +{ + struct ser_device *ser = tty->disc_data; + /* Remove may be called inside or outside of rtnl_lock */ + int islocked = rtnl_is_locked(); + if (!islocked) + rtnl_lock(); + /* device is freed automagically by net-sysfs */ + dev_close(ser->dev); + unregister_netdevice(ser->dev); + list_del(&ser->node); + debugfs_deinit(ser); + tty_kref_put(ser->tty); + if (!islocked) + rtnl_unlock(); +} + +/* The line discipline structure. */ +static struct tty_ldisc_ops caif_ldisc = { + .owner = THIS_MODULE, + .magic = TTY_LDISC_MAGIC, + .name = "n_caif", + .open = ldisc_open, + .close = ldisc_close, + .receive_buf = ldisc_receive, + .write_wakeup = ldisc_tx_wakeup +}; + +static int register_ldisc(void) +{ + int result; + result = tty_register_ldisc(N_CAIF, &caif_ldisc); + if (result < 0) { + pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, + result); + return result; + } + return result; +} +static const struct net_device_ops netdev_ops = { + .ndo_open = caif_net_open, + .ndo_stop = caif_net_close, + .ndo_start_xmit = caif_xmit +}; + +static void caifdev_setup(struct net_device *dev) +{ + struct ser_device *serdev = netdev_priv(dev); + dev->features = 0; + dev->netdev_ops = &netdev_ops; + dev->type = ARPHRD_CAIF; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = CAIF_MAX_MTU; + dev->hard_header_len = CAIF_NEEDED_HEADROOM; + dev->tx_queue_len = 0; + dev->destructor = free_netdev; + skb_queue_head_init(&serdev->head); + serdev->common.link_select = CAIF_LINK_LOW_LATENCY; + serdev->common.use_frag = true; + serdev->common.use_stx = ser_use_stx; + serdev->common.use_fcs = ser_use_fcs; + serdev->dev = dev; +} + + +static int caif_net_open(struct net_device *dev) +{ + struct ser_device *ser; + ser = netdev_priv(dev); + netif_wake_queue(dev); + return 0; +} + +static int caif_net_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static int __init caif_ser_init(void) +{ + int ret; + ret = register_ldisc(); + debugfsdir = debugfs_create_dir("caif_serial", NULL); + return ret; +} + +static void __exit caif_ser_exit(void) +{ + struct ser_device *ser = NULL; + struct list_head *node; + struct list_head *_tmp; + list_for_each_safe(node, _tmp, &ser_list) { + ser = list_entry(node, struct ser_device, node); + dev_close(ser->dev); + unregister_netdevice(ser->dev); + list_del(node); + } + tty_unregister_ldisc(N_CAIF); + debugfs_remove_recursive(debugfsdir); +} + +module_init(caif_ser_init); +module_exit(caif_ser_exit); diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index a2f29a38798..5f983487d6e 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -35,7 +35,6 @@ #include <linux/string.h> #include <linux/types.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> @@ -662,7 +661,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) at91_poll_err_frame(dev, cf, reg_sr); netif_receive_skb(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; @@ -899,7 +897,6 @@ static void at91_irq_err(struct net_device *dev) at91_irq_err_state(dev, cf, new_state); netif_rx(skb); - dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 866905fa411..d77264ad326 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -18,10 +18,10 @@ #include <linux/skbuff.h> #include <linux/platform_device.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <asm/bfin_can.h> #include <asm/portmux.h> #define DRV_NAME "bfin_can" @@ -29,90 +29,6 @@ #define TX_ECHO_SKB_MAX 1 /* - * transmit and receive channels - */ -#define TRANSMIT_CHL 24 -#define RECEIVE_STD_CHL 0 -#define RECEIVE_EXT_CHL 4 -#define RECEIVE_RTR_CHL 8 -#define RECEIVE_EXT_RTR_CHL 12 -#define MAX_CHL_NUMBER 32 - -/* - * bfin can registers layout - */ -struct bfin_can_mask_regs { - u16 aml; - u16 dummy1; - u16 amh; - u16 dummy2; -}; - -struct bfin_can_channel_regs { - u16 data[8]; - u16 dlc; - u16 dummy1; - u16 tsv; - u16 dummy2; - u16 id0; - u16 dummy3; - u16 id1; - u16 dummy4; -}; - -struct bfin_can_regs { - /* - * global control and status registers - */ - u16 mc1; /* offset 0 */ - u16 dummy1; - u16 md1; /* offset 4 */ - u16 rsv1[13]; - u16 mbtif1; /* offset 0x20 */ - u16 dummy2; - u16 mbrif1; /* offset 0x24 */ - u16 dummy3; - u16 mbim1; /* offset 0x28 */ - u16 rsv2[11]; - u16 mc2; /* offset 0x40 */ - u16 dummy4; - u16 md2; /* offset 0x44 */ - u16 dummy5; - u16 trs2; /* offset 0x48 */ - u16 rsv3[11]; - u16 mbtif2; /* offset 0x60 */ - u16 dummy6; - u16 mbrif2; /* offset 0x64 */ - u16 dummy7; - u16 mbim2; /* offset 0x68 */ - u16 rsv4[11]; - u16 clk; /* offset 0x80 */ - u16 dummy8; - u16 timing; /* offset 0x84 */ - u16 rsv5[3]; - u16 status; /* offset 0x8c */ - u16 dummy9; - u16 cec; /* offset 0x90 */ - u16 dummy10; - u16 gis; /* offset 0x94 */ - u16 dummy11; - u16 gim; /* offset 0x98 */ - u16 rsv6[3]; - u16 ctrl; /* offset 0xa0 */ - u16 dummy12; - u16 intr; /* offset 0xa4 */ - u16 rsv7[7]; - u16 esr; /* offset 0xb4 */ - u16 rsv8[37]; - - /* - * channel(mailbox) mask and message registers - */ - struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ - struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ -}; - -/* * bfin can private data */ struct bfin_can_priv { @@ -163,7 +79,7 @@ static int bfin_can_set_bittiming(struct net_device *dev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) timing |= SAM; - bfin_write16(®->clk, clk); + bfin_write16(®->clock, clk); bfin_write16(®->timing, timing); dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", @@ -185,11 +101,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev) bfin_write16(®->gim, 0); /* reset can and enter configuration mode */ - bfin_write16(®->ctrl, SRS | CCR); + bfin_write16(®->control, SRS | CCR); SSYNC(); - bfin_write16(®->ctrl, CCR); + bfin_write16(®->control, CCR); SSYNC(); - while (!(bfin_read16(®->ctrl) & CCA)) { + while (!(bfin_read16(®->control) & CCA)) { udelay(10); if (--timeout == 0) { dev_err(dev->dev.parent, @@ -244,7 +160,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev) /* * leave configuration mode */ - bfin_write16(®->ctrl, bfin_read16(®->ctrl) & ~CCR); + bfin_write16(®->control, bfin_read16(®->control) & ~CCR); while (bfin_read16(®->status) & CCA) { udelay(10); @@ -726,7 +642,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) if (netif_running(dev)) { /* enter sleep mode */ - bfin_write16(®->ctrl, bfin_read16(®->ctrl) | SMR); + bfin_write16(®->control, bfin_read16(®->control) | SMR); SSYNC(); while (!(bfin_read16(®->intr) & SMACK)) { udelay(10); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 904aa369f80..d0f8c7e67e7 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/can.h> diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f8cc168ec76..8431eb08075 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -58,7 +58,6 @@ * */ -#include <linux/can.h> #include <linux/can/core.h> #include <linux/can/dev.h> #include <linux/can/platform/mcp251x.h> @@ -73,6 +72,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> @@ -922,12 +922,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi) struct net_device *net; struct mcp251x_priv *priv; struct mcp251x_platform_data *pdata = spi->dev.platform_data; + int model = spi_get_device_id(spi)->driver_data; int ret = -ENODEV; if (!pdata) /* Platform data is required for osc freq */ goto error_out; + if (model) + pdata->model = model; + /* Allocate can/net device */ net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); if (!net) { @@ -1117,6 +1121,15 @@ static int mcp251x_can_resume(struct spi_device *spi) #define mcp251x_can_resume NULL #endif +static struct spi_device_id mcp251x_id_table[] = { + { "mcp251x", 0 /* Use pdata.model */ }, + { "mcp2510", CAN_MCP251X_MCP2510 }, + { "mcp2515", CAN_MCP251X_MCP2515 }, + { }, +}; + +MODULE_DEVICE_TABLE(spi, mcp251x_id_table); + static struct spi_driver mcp251x_can_driver = { .driver = { .name = DEVICE_NAME, @@ -1124,6 +1137,7 @@ static struct spi_driver mcp251x_can_driver = { .owner = THIS_MODULE, }, + .id_table = mcp251x_id_table, .probe = mcp251x_can_probe, .remove = __devexit_p(mcp251x_can_remove), .suspend = mcp251x_can_suspend, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 03e7c48465a..225fd147774 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -25,7 +25,6 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/netdevice.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 6b7dd578d41..64c378cd0c3 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -28,7 +28,6 @@ #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/io.h> diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 9e277d64a31..ae3505afd68 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -53,7 +53,9 @@ config CAN_PLX_PCI Driver supports now: - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/) - Adlink PCI-7841/cPCI-7841 SE card + - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/) + - esd CAN-PCI/PMC/266 + - esd CAN-PCIe/2000 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) - endif diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index 87300606abb..36f4f9780c3 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -22,8 +22,8 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/pci.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/io.h> diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 441e776a7f5..ed004cebd31 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -36,7 +36,6 @@ #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/io.h> diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 6b46a6395f8..437b5c716a2 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -25,8 +25,8 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/pci.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/io.h> @@ -40,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " "Adlink PCI-7841/cPCI-7841 SE, " "Marathon CAN-bus-PCI, " - "TEWS TECHNOLOGIES TPMC810"); + "TEWS TECHNOLOGIES TPMC810, " + "esd CAN-PCI/CPCI/PCI104/200, " + "esd CAN-PCI/PMC/266, " + "esd CAN-PCIe/2000") MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 @@ -49,11 +52,14 @@ struct plx_pci_card { int channels; /* detected channels count */ struct net_device *net_dev[PLX_PCI_MAX_CHAN]; void __iomem *conf_addr; + + /* Pointer to device-dependent reset function */ + void (*reset_func)(struct pci_dev *pdev); }; #define PLX_PCI_CAN_CLOCK (16000000 / 2) -/* PLX90xx registers */ +/* PLX9030/9050/9052 registers */ #define PLX_INTCSR 0x4c /* Interrupt Control/Status */ #define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response, * Serial EEPROM, and Initialization @@ -65,6 +71,14 @@ struct plx_pci_card { #define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ #define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ +/* PLX9056 registers */ +#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */ +#define PLX9056_CNTRL 0x6c /* Control / Software Reset */ + +#define PLX9056_LINTI (1 << 11) +#define PLX9056_PCI_INT_EN (1 << 8) +#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */ + /* * The board configuration is probably following: * RX1 is connected to ground. @@ -100,6 +114,13 @@ struct plx_pci_card { #define ADLINK_PCI_VENDOR_ID 0x144A #define ADLINK_PCI_DEVICE_ID 0x7841 +#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004 +#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009 +#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e +#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b +#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200 +#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501 + #define MARATHON_PCI_DEVICE_ID 0x2715 #define TEWS_PCI_VENDOR_ID 0x1498 @@ -107,6 +128,7 @@ struct plx_pci_card { static void plx_pci_reset_common(struct pci_dev *pdev); static void plx_pci_reset_marathon(struct pci_dev *pdev); +static void plx9056_pci_reset_common(struct pci_dev *pdev); struct plx_pci_channel_map { u32 bar; @@ -147,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = { /* based on PLX9052 */ }; +static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = { + "esd CAN-PCI/CPCI/PCI104/200", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx_pci_reset_common + /* based on PLX9030/9050 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = { + "esd CAN-PCI/PMC/266", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx9056_pci_reset_common + /* based on PLX9056 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = { + "esd CAN-PCIe/2000", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx9056_pci_reset_common + /* based on PEX8311 */ +}; + static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = { "Marathon CAN-bus-PCI", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, @@ -179,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = { (kernel_ulong_t)&plx_pci_card_info_adlink_se }, { + /* esd CAN-PCI/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-CPCI/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-PCI104/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-PCI/266 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd266 + }, + { + /* esd CAN-PMC/266 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd266 + }, + { + /* esd CAN-PCIE/2000 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd2000 + }, + { /* Marathon CAN-bus-PCI card */ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -241,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) } /* - * PLX90xx software reset + * PLX9030/50/52 software reset * Also LRESET# asserts and brings to reset device on the Local Bus (if wired). * For most cards it's enough for reset the SJA1000 chips. */ @@ -258,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev) iowrite32(cntrl, card->conf_addr + PLX_CNTRL); }; +/* + * PLX9056 software reset + * Assert LRESET# and reset device(s) on the Local Bus (if wired). + */ +static void plx9056_pci_reset_common(struct pci_dev *pdev) +{ + struct plx_pci_card *card = pci_get_drvdata(pdev); + u32 cntrl; + + /* issue a local bus reset */ + cntrl = ioread32(card->conf_addr + PLX9056_CNTRL); + cntrl |= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + udelay(100); + cntrl ^= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + + /* reload local configuration from EEPROM */ + cntrl |= PLX9056_PCI_RCR; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + + /* + * There is no safe way to poll for the end + * of reconfiguration process. Waiting for 10ms + * is safe. + */ + mdelay(10); + + cntrl ^= PLX9056_PCI_RCR; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); +}; + /* Special reset function for Marathon card */ static void plx_pci_reset_marathon(struct pci_dev *pdev) { @@ -301,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev) free_sja1000dev(dev); } - plx_pci_reset_common(pdev); + card->reset_func(pdev); /* - * Disable interrupts from PCI-card (PLX90xx) and disable Local_1, - * Local_2 interrupts + * Disable interrupts from PCI-card and disable local + * interrupts */ - iowrite32(0x0, card->conf_addr + PLX_INTCSR); + if (pdev->device != PCI_DEVICE_ID_PLX_9056) + iowrite32(0x0, card->conf_addr + PLX_INTCSR); + else + iowrite32(0x0, card->conf_addr + PLX9056_INTCSR); if (card->conf_addr) pci_iounmap(pdev, card->conf_addr); @@ -366,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, card->conf_addr = addr + ci->conf_map.offset; ci->reset_func(pdev); + card->reset_func = ci->reset_func; /* Detect available channels */ for (i = 0; i < ci->channel_count; i++) { @@ -437,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, * Local_2 interrupts from the SJA1000 chips */ - val = ioread32(card->conf_addr + PLX_INTCSR); - val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; - iowrite32(val, card->conf_addr + PLX_INTCSR); - + if (pdev->device != PCI_DEVICE_ID_PLX_9056) { + val = ioread32(card->conf_addr + PLX_INTCSR); + if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH) + val |= PLX_LINT1_EN | PLX_PCI_INT_EN; + else + val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; + iowrite32(val, card->conf_addr + PLX_INTCSR); + } else { + iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN, + card->conf_addr + PLX9056_INTCSR); + } return 0; failure_cleanup: diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 145b1a731a5..618c11222ab 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -60,7 +60,6 @@ #include <linux/skbuff.h> #include <linux/delay.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index a6a51f15596..496223e9e2f 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -23,7 +23,6 @@ #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 9dd076a626a..34e79efbd2f 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -38,7 +38,6 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/of_platform.h> diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 628374c2a05..b65cabb361a 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -24,7 +24,6 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/irq.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> #include <linux/io.h> @@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); MODULE_LICENSE("GPL v2"); -static u8 sp_read_reg(const struct sja1000_priv *priv, int reg) +static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg); } -static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val) +static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg); } +static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg) +{ + return ioread8(priv->reg_base + reg * 2); +} + +static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val) +{ + iowrite8(val, priv->reg_base + reg * 2); +} + +static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg) +{ + return ioread8(priv->reg_base + reg * 4); +} + +static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val) +{ + iowrite8(val, priv->reg_base + reg * 4); +} + static int sp_probe(struct platform_device *pdev) { int err; @@ -90,14 +109,28 @@ static int sp_probe(struct platform_device *pdev) priv = netdev_priv(dev); dev->irq = res_irq->start; - priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); priv->reg_base = addr; - priv->read_reg = sp_read_reg; - priv->write_reg = sp_write_reg; priv->can.clock.freq = pdata->clock; priv->ocr = pdata->ocr; priv->cdr = pdata->cdr; + switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) { + case IORESOURCE_MEM_32BIT: + priv->read_reg = sp_read_reg32; + priv->write_reg = sp_write_reg32; + break; + case IORESOURCE_MEM_16BIT: + priv->read_reg = sp_read_reg16; + priv->write_reg = sp_write_reg16; + break; + case IORESOURCE_MEM_8BIT: + default: + priv->read_reg = sp_read_reg8; + priv->write_reg = sp_write_reg8; + break; + } + dev_set_drvdata(&pdev->dev, dev); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 0c3d2ba0d17..4d07f1ee716 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -47,7 +47,6 @@ #include <linux/platform_device.h> #include <linux/clk.h> -#include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/platform/ti_hecc.h> diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index d124d837ae5..a30b8f480f6 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -48,6 +48,7 @@ #include <linux/if_ether.h> #include <linux/can.h> #include <linux/can/dev.h> +#include <linux/slab.h> #include <net/rtnetlink.h> static __initdata const char banner[] = diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 9bd155e4111..bd857a20a75 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2957,20 +2957,20 @@ static void cas_process_mc_list(struct cas *cp) { u16 hash_table[16]; u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i = 1; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, cp->dev) { + netdev_for_each_mc_addr(ha, cp->dev) { if (i <= CAS_MC_EXACT_MATCH_SIZE) { /* use the alternate mac address registers for the * first 15 multicast addresses */ - writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], + writel((ha->addr[4] << 8) | ha->addr[5], cp->regs + REG_MAC_ADDRN(i*3 + 0)); - writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], + writel((ha->addr[2] << 8) | ha->addr[3], cp->regs + REG_MAC_ADDRN(i*3 + 1)); - writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], + writel((ha->addr[0] << 8) | ha->addr[1], cp->regs + REG_MAC_ADDRN(i*3 + 2)); i++; } @@ -2978,7 +2978,7 @@ static void cas_process_mc_list(struct cas *cp) /* use hw hash table for the next series of * multicast addresses */ - crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); + crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 2d11afe4531..036b2dfb1d4 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h @@ -51,6 +51,7 @@ #include <linux/mdio.h> #include <linux/crc32.h> #include <linux/init.h> +#include <linux/slab.h> #include <asm/io.h> #include <linux/pci_ids.h> diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index a6eb30a6e2b..7dbb16d36ff 100644 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c @@ -44,6 +44,7 @@ #include "suni1x10gexp_regs.h" #include <linux/crc32.h> +#include <linux/slab.h> #define OFFSET(REG_ADDR) ((REG_ADDR) << 2) @@ -376,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; } else if (t1_rx_mode_mc_cnt(rm)) { /* Accept one or more multicast(s). */ - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int bit; u16 mc_filter[4] = { 0, }; - netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) { - bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */ + netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { + /* bit[23:28] */ + bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; mc_filter[bit >> 4] |= 1 << (bit & 0xf); } pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 71384114a4e..f01cfdb995d 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -53,6 +53,7 @@ #include <linux/ip.h> #include <linux/in.h> #include <linux/if_arp.h> +#include <linux/slab.h> #include "cpl5_cmd.h" #include "sge.h" @@ -161,14 +162,14 @@ struct respQ_e { */ struct cmdQ_ce { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(dma_addr); - DECLARE_PCI_UNMAP_LEN(dma_len); + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); }; struct freelQ_ce { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(dma_addr); - DECLARE_PCI_UNMAP_LEN(dma_len); + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); }; /* @@ -248,7 +249,7 @@ static void restart_sched(unsigned long); * * Interrupts are handled by a single CPU and it is likely that on a MP system * the application is migrated to another CPU. In that scenario, we try to - * seperate the RX(in irq context) and TX state in order to decrease memory + * separate the RX(in irq context) and TX state in order to decrease memory * contention. */ struct sge { @@ -459,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, again: for (i = 0; i < MAX_NPORTS; i++) { - s->port = ++s->port & (MAX_NPORTS - 1); + s->port = (s->port + 1) & (MAX_NPORTS - 1); skbq = &s->p[s->port].skbq; skb = skb_peek(skbq); @@ -517,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) while (q->credits--) { struct freelQ_ce *ce = &q->centries[cidx]; - pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); dev_kfree_skb(ce->skb); ce->skb = NULL; @@ -632,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) q->in_use -= n; ce = &q->centries[cidx]; while (n--) { - if (likely(pci_unmap_len(ce, dma_len))) { - pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), + if (likely(dma_unmap_len(ce, dma_len))) { + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_TODEVICE); if (q->sop) q->sop = 0; @@ -850,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) skb_reserve(skb, sge->rx_pkt_pad); ce->skb = skb; - pci_unmap_addr_set(ce, dma_addr, mapping); - pci_unmap_len_set(ce, dma_len, dma_len); + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, dma_len); e->addr_lo = (u32)mapping; e->addr_hi = (u64)mapping >> 32; e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); @@ -1058,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, skb_reserve(skb, 2); /* align IP header */ skb_put(skb, len); pci_dma_sync_single_for_cpu(pdev, - pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(ce->skb, skb->data, len); pci_dma_sync_single_for_device(pdev, - pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); recycle_fl_buf(fl, fl->cidx); return skb; @@ -1076,8 +1077,8 @@ use_orig_buf: return NULL; } - pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); skb = ce->skb; prefetch(skb->data); @@ -1099,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) struct freelQ_ce *ce = &fl->centries[fl->cidx]; struct sk_buff *skb = ce->skb; - pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), - pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); pr_err("%s: unexpected offload packet, cmd %u\n", adapter->name, *skb->data); recycle_fl_buf(fl, fl->cidx); @@ -1122,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { unsigned int nfrags = skb_shinfo(skb)->nr_frags; - unsigned int i, len = skb->len - skb->data_len; + unsigned int i, len = skb_headlen(skb); while (len > SGE_TX_DESC_MAX_PLEN) { count++; len -= SGE_TX_DESC_MAX_PLEN; @@ -1181,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx, write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, *gen, nfrags == 0 && *desc_len == 0); ce1->skb = NULL; - pci_unmap_len_set(ce1, dma_len, 0); + dma_unmap_len_set(ce1, dma_len, 0); *desc_mapping += SGE_TX_DESC_MAX_PLEN; if (*desc_len) { ce1++; @@ -1218,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, ce = &q->centries[pidx]; mapping = pci_map_single(adapter->pdev, skb->data, - skb->len - skb->data_len, PCI_DMA_TODEVICE); + skb_headlen(skb), PCI_DMA_TODEVICE); desc_mapping = mapping; - desc_len = skb->len - skb->data_len; + desc_len = skb_headlen(skb); flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | @@ -1232,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, e->addr_hi = (u64)desc_mapping >> 32; e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); ce->skb = NULL; - pci_unmap_len_set(ce, dma_len, 0); + dma_unmap_len_set(ce, dma_len, 0); if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && desc_len > SGE_TX_DESC_MAX_PLEN) { @@ -1256,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, } ce->skb = NULL; - pci_unmap_addr_set(ce, dma_addr, mapping); - pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); for (i = 0; nfrags--; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1283,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, write_tx_desc(e1, desc_mapping, desc_len, gen, nfrags == 0); ce->skb = NULL; - pci_unmap_addr_set(ce, dma_addr, mapping); - pci_unmap_len_set(ce, dma_len, frag->size); + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, frag->size); } ce->skb = skb; wmb(); diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 9781942992e..4b451a7c03e 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk) struct cnic_local *cp = dev->cnic_priv; u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; - prefetch(cp->status_blk.bnx2x); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + prefetch(cp->status_blk.bnx2x); + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) tasklet_schedule(&cp->cnic_irq_task); - - cnic_chk_pkt_rings(cp); + cnic_chk_pkt_rings(cp); + } return 0; } diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 60777fd90b3..bdfff784645 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map) static void cpmac_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *iter; + struct netdev_hw_addr *ha; u8 tmp; u32 mbp, bit, hash[2] = { 0, }; struct cpmac_priv *priv = netdev_priv(dev); @@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev) * cpmac uses some strange mac address hashing * (not crc32) */ - netdev_for_each_mc_addr(iter, dev) { + netdev_for_each_mc_addr(ha, dev) { bit = 0; - tmp = iter->dmi_addr[0]; + tmp = ha->addr[0]; bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = iter->dmi_addr[1]; + tmp = ha->addr[1]; bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = iter->dmi_addr[2]; + tmp = ha->addr[2]; bit ^= (tmp >> 6) ^ tmp; - tmp = iter->dmi_addr[3]; + tmp = ha->addr[3]; bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = iter->dmi_addr[4]; + tmp = ha->addr[4]; bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = iter->dmi_addr[5]; + tmp = ha->addr[5]; bit ^= (tmp >> 6) ^ tmp; bit &= 0x3f; hash[bit / 32] |= 1 << (bit % 32); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index dd24aadb778..f49ad8ed9b0 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -18,7 +18,6 @@ #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/errno.h> @@ -1596,16 +1595,16 @@ set_multicast_list(struct net_device *dev) } else { /* MC mode, receive normal and MC packets */ char hash_ix; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *baddr; lo_bits = 0x00000000ul; hi_bits = 0x00000000ul; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Calculate the hash index for the GA registers */ hash_ix = 0; - baddr = dmi->dmi_addr; + baddr = ha->addr; hash_ix ^= (*baddr) & 0x3f; hash_ix ^= ((*baddr) >> 6) & 0x03; ++baddr; diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 14624019ce7..4c38491b8ef 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -138,12 +138,12 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/delay.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> @@ -580,7 +580,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) } #ifdef CONFIG_SH_HICOSH4 - /* truely reset the chip */ + /* truly reset the chip */ writeword(ioaddr, ADD_PORT, 0x0114); writeword(ioaddr, DATA_PORT, 0x0040); #endif diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 3e8618b4efb..4cd7f420766 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -264,6 +264,10 @@ struct adapter { struct work_struct fatal_error_handler_task; struct work_struct link_fault_handler_task; + struct work_struct db_full_task; + struct work_struct db_empty_task; + struct work_struct db_drop_task; + struct dentry *debugfs_root; struct mutex mdio_lock; @@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data); irqreturn_t t3_sge_intr_msix(int irq, void *cookie); +extern struct workqueue_struct *cxgb3_wq; int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index cecdec1551d..aced6c5e635 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -45,6 +45,8 @@ #include <linux/firmware.h> #include <linux/log2.h> #include <linux/stringify.h> +#include <linux/sched.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include "common.h" @@ -140,7 +142,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); * will block keventd as it needs the rtnl lock, and we'll deadlock waiting * for our work to complete. Get our own work queue to solve this. */ -static struct workqueue_struct *cxgb3_wq; +struct workqueue_struct *cxgb3_wq; /** * link_report - show link status and link speed/duplex @@ -586,6 +588,19 @@ static void setup_rss(struct adapter *adap) V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); } +static void ring_dbs(struct adapter *adap) +{ + int i, j; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + for (j = 0; j < SGE_TXQ_PER_SET; j++) + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); + } +} + static void init_napi(struct adapter *adap) { int i; @@ -2751,6 +2766,42 @@ static void t3_adap_check_task(struct work_struct *work) spin_unlock_irq(&adapter->work_lock); } +static void db_full_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_full_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); +} + +static void db_empty_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_empty_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); +} + +static void db_drop_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_drop_task); + unsigned long delay = 1000; + unsigned short r; + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); + + /* + * Sleep a while before ringing the driver qset dbs. + * The delay is between 1000-2023 usecs. + */ + get_random_bytes(&r, 2); + delay += r & 1023; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(delay)); + ring_dbs(adapter); +} + /* * Processes external (PHY) interrupts in process context. */ @@ -3219,6 +3270,11 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); + + INIT_WORK(&adapter->db_full_task, db_full_task); + INIT_WORK(&adapter->db_empty_task, db_empty_task); + INIT_WORK(&adapter->db_drop_task, db_drop_task); + INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); for (i = 0; i < ai->nports0 + ai->nports1; ++i) { diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 9498361119d..c6485b39eb0 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -31,6 +31,7 @@ */ #include <linux/list.h> +#include <linux/slab.h> #include <net/neighbour.h> #include <linux/notifier.h> #include <asm/atomic.h> diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index 670aa62042d..929c298115c 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h @@ -73,7 +73,10 @@ enum { OFFLOAD_STATUS_UP, OFFLOAD_STATUS_DOWN, OFFLOAD_PORT_DOWN, - OFFLOAD_PORT_UP + OFFLOAD_PORT_UP, + OFFLOAD_DB_FULL, + OFFLOAD_DB_EMPTY, + OFFLOAD_DB_DROP }; struct cxgb3_client { diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index ff1611f90e7..2f3ee721c3e 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c @@ -34,6 +34,7 @@ #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/jhash.h> +#include <linux/slab.h> #include <net/neighbour.h> #include "common.h" #include "t3cdev.h" diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 1b5327b5a96..cb42353c9fd 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -254,6 +254,22 @@ #define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) #define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) +#define S_HIPRIORITYDBFULL 7 +#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL) +#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U) + +#define S_HIPRIORITYDBEMPTY 6 +#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY) +#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U) + +#define S_LOPRIORITYDBFULL 5 +#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL) +#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U) + +#define S_LOPRIORITYDBEMPTY 4 +#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY) +#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U) + #define S_RSPQDISABLED 3 #define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) #define F_RSPQDISABLED V_RSPQDISABLED(1U) diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 04820590374..5962b911b5b 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -36,12 +36,14 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <net/arp.h> #include "common.h" #include "regs.h" #include "sge_defs.h" #include "t3_cpl.h" #include "firmware_exports.h" +#include "cxgb3_offload.h" #define USE_GTS 0 @@ -116,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */ struct sk_buff *skb; struct fl_pg_chunk pg_chunk; }; - DECLARE_PCI_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_ADDR(dma_addr); }; struct rsp_desc { /* response queue descriptor */ @@ -196,17 +198,17 @@ static inline void refill_rspq(struct adapter *adapter, /** * need_skb_unmap - does the platform need unmapping of sk_buffs? * - * Returns true if the platfrom needs sk_buff unmapping. The compiler + * Returns true if the platform needs sk_buff unmapping. The compiler * optimizes away unecessary code if this returns true. */ static inline int need_skb_unmap(void) { /* - * This structure is used to tell if the platfrom needs buffer + * This structure is used to tell if the platform needs buffer * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. */ struct dummy { - DECLARE_PCI_UNMAP_ADDR(addr); + DEFINE_DMA_UNMAP_ADDR(addr); }; return sizeof(struct dummy) != 0; @@ -361,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { - pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), + pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), q->buf_size, PCI_DMA_FROMDEVICE); kfree_skb(d->skb); d->skb = NULL; @@ -417,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, if (unlikely(pci_dma_mapping_error(pdev, mapping))) return -ENOMEM; - pci_unmap_addr_set(sd, dma_addr, mapping); + dma_unmap_addr_set(sd, dma_addr, mapping); d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); @@ -513,7 +515,7 @@ nomem: q->alloc_failed++; break; } mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; - pci_unmap_addr_set(sd, dma_addr, mapping); + dma_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); pci_dma_sync_single_for_device(adap->pdev, mapping, @@ -789,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, if (likely(skb != NULL)) { __skb_put(skb, len); pci_dma_sync_single_for_cpu(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, + dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); memcpy(skb->data, sd->skb->data, len); pci_dma_sync_single_for_device(adap->pdev, - pci_unmap_addr(sd, dma_addr), len, + dma_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) goto use_orig_buf; @@ -808,7 +810,7 @@ recycle: goto recycle; use_orig_buf: - pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), + pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), fl->buf_size, PCI_DMA_FROMDEVICE); skb = sd->skb; skb_put(skb, len); @@ -841,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); + dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); newskb = skb = q->pg_skb; if (!skb && (len <= SGE_RX_COPY_THRES)) { @@ -2095,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, fl->credits--; pci_dma_sync_single_for_cpu(adap->pdev, - pci_unmap_addr(sd, dma_addr), + dma_unmap_addr(sd, dma_addr), fl->buf_size - SGE_PG_RSVD, PCI_DMA_FROMDEVICE); @@ -2841,8 +2843,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter) } if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) - CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", - status & F_HIPIODRBDROPERR ? "high" : "lo"); + queue_work(cxgb3_wq, &adapter->db_drop_task); + + if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) + queue_work(cxgb3_wq, &adapter->db_full_task); + + if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) + queue_work(cxgb3_wq, &adapter->db_empty_task); t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & SGE_FATALERR) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 3ab9f51918a..95a8ba0759f 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ - F_HIRCQPARITYERROR) + F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \ + F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \ + F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \ + F_LOPIODRBDROPERR) #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ F_NFASRCHFAIL) diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index c142a2132e9..3af19a55037 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev) if (dev->flags & IFF_ALLMULTI) hash_lo = hash_hi = 0xffffffff; else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int exact_addr_idx = mac->nucast; hash_lo = hash_hi = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) if (exact_addr_idx < EXACT_ADDR_FILTERS) set_addr_filter(mac, exact_addr_idx++, - dmi->dmi_addr); + ha->addr); else { - int hash = hash_hw_addr(dmi->dmi_addr); + int hash = hash_hw_addr(ha->addr); if (hash < 32) hash_lo |= (1 << hash); diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile new file mode 100644 index 00000000000..498667487f5 --- /dev/null +++ b/drivers/net/cxgb4/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 driver +# + +obj-$(CONFIG_CHELSIO_T4) += cxgb4.o + +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h new file mode 100644 index 00000000000..3d8ff4889b5 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4.h @@ -0,0 +1,741 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_H__ +#define __CXGB4_H__ + +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <asm/io.h> +#include "cxgb4_uld.h" +#include "t4_hw.h" + +#define FW_VERSION_MAJOR 1 +#define FW_VERSION_MINOR 1 +#define FW_VERSION_MICRO 0 + +enum { + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 16, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + MEM_EDC0, + MEM_EDC1, + MEM_MC +}; + +enum dev_master { + MASTER_CANT, + MASTER_MAY, + MASTER_MUST +}; + +enum dev_state { + DEV_STATE_UNINIT, + DEV_STATE_INIT, + DEV_STATE_ERR +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct lb_port_stats { + u64 octets; + u64 frames; + u64 bcast_frames; + u64 mcast_frames; + u64 ucast_frames; + u64 error_frames; + + u64 frames_64; + u64 frames_65_127; + u64 frames_128_255; + u64 frames_256_511; + u64 frames_512_1023; + u64 frames_1024_1518; + u64 frames_1519_max; + + u64 drop; + + u64 ovflow0; + u64 ovflow1; + u64 ovflow2; + u64 ovflow3; + u64 trunc0; + u64 trunc1; + u64 trunc2; + u64 trunc3; +}; + +struct tp_tcp_stats { + u32 tcpOutRsts; + u64 tcpInSegs; + u64 tcpOutSegs; + u64 tcpRetransSegs; +}; + +struct tp_err_stats { + u32 macInErrs[4]; + u32 hdrInErrs[4]; + u32 tcpInErrs[4]; + u32 tnlCongDrops[4]; + u32 ofldChanDrops[4]; + u32 tnlTxDrops[4]; + u32 ofldVlanDrops[4]; + u32 tcp6InErrs[4]; + u32 ofldNoNeigh; + u32 ofldCongDefer; +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ +}; + +struct vpd_params { + unsigned int cclk; + u8 ec[EC_LEN + 1]; + u8 sn[SERNUM_LEN + 1]; + u8 id[ID_LEN + 1]; +}; + +struct pci_params { + unsigned char speed; + unsigned char width; +}; + +struct adapter_params { + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + unsigned int fw_vers; + unsigned int tp_vers; + u8 api_vers[7]; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + unsigned char rev; /* chip revision */ + unsigned char offload; + + unsigned int ofldq_wr_cred; +}; + +struct trace_params { + u32 data[TRACE_LEN / 4]; + u32 mask[TRACE_LEN / 4]; + unsigned short snap_len; + unsigned short min_len; + unsigned char skip_ofst; + unsigned char skip_len; + unsigned char invert; + unsigned char port; +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +enum { + MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ + MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ +}; + +enum { + MAX_EGRQ = 128, /* max # of egress queues, including FLs */ + MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ +}; + +struct adapter; +struct vlan_group; +struct sge_rspq; + +struct port_info { + struct adapter *adapter; + struct vlan_group *vlan_grp; + u16 viid; + s16 xact_addr_filt; /* index of exact MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + s8 mdio_addr; + u8 port_type; + u8 mod_type; + u8 port_id; + u8 tx_chan; + u8 lport; /* associated offload logical port */ + u8 rx_offload; /* CSO, etc */ + u8 nqsets; /* # of qsets */ + u8 first_qset; /* index of first qset */ + struct link_config link_cfg; +}; + +/* port_info.rx_offload flags */ +enum { + RX_CSO = 1 << 0, +}; + +struct dentry; +struct work_struct; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + QUEUES_BOUND = (1 << 3), + FW_OK = (1 << 4), +}; + +struct rx_sw_desc; + +struct sge_fl { /* SGE free-buffer queue state */ + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long large_alloc_failed; + unsigned long starving; + /* RO fields */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + __be64 *desc; /* address of HW Rx descriptor ring */ + dma_addr_t addr; /* bus address of HW ring start */ +}; + +/* A packet gather list */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct napi_struct napi; + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 uld; /* ULD handling this queue */ + u8 idx; /* queue index within its group */ + int offset; /* offset into current Rx buffer */ + u16 cntxt_id; /* SGE context id for the response q */ + u16 abs_id; /* absolute SGE id for the response q */ + __be64 *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + struct adapter *adap; + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; +}; + +struct sge_eth_stats { /* Ethernet queue statistics */ + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_stats stats; +} ____cacheline_aligned_in_smp; + +struct sge_ofld_stats { /* offload queue statistics */ + unsigned long pkts; /* # of packets */ + unsigned long imm; /* # of immediate-data packets */ + unsigned long an; /* # of asynchronous notifications */ + unsigned long nomem; /* # of responses deferred due to no mem */ +}; + +struct sge_ofld_rxq { /* SW offload Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_ofld_stats stats; +} ____cacheline_aligned_in_smp; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc; + +struct sge_txq { + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* physical address of the ring */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ofld_txq { /* state for an SGE offload Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ctrl_txq { /* state for an SGE control Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ +} ____cacheline_aligned_in_smp; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 ofldqsets; /* # of active offload queue sets */ + u16 rdmaqs; /* # of available RDMA Rx queues */ + u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 rdma_rxq[NCHAN]; + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + unsigned int starve_thres; + u8 idma_state[2]; + void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ + struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + DECLARE_BITMAP(txq_maperr, MAX_EGRQ); + struct timer_list rx_timer; /* refills starving FLs */ + struct timer_list tx_timer; /* checks Tx queues */ +}; + +#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) +#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) + +struct l2t_data; + +struct adapter { + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + + struct adapter_params params; + struct cxgb4_virt_res vres; + unsigned int swintr; + + unsigned int wol; + + struct { + unsigned short vec; + char desc[14]; + } msix_info[MAX_INGQ + 1]; + + struct sge sge; + + struct net_device *port[MAX_NPORTS]; + u8 chan_map[NCHAN]; /* channel -> port map */ + + struct l2t_data *l2t; + void *uld_handle[CXGB4_ULD_MAX]; + struct list_head list_node; + + struct tid_info tids; + void **tid_release_head; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + bool tid_release_task_busy; + + struct dentry *debugfs_root; + + spinlock_t stats_lock; +}; + +static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) +{ + return readl(adap->regs + reg_addr); +} + +static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) +{ + writel(val, adap->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) +{ + return readq(adap->regs + reg_addr); +} + +static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) +{ + writeq(val, adap->regs + reg_addr); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); + +void t4_free_sge_resources(struct adapter *adap); +irq_handler_t t4_intr_handler(struct adapter *adap); +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid); +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid); +irqreturn_t t4_sge_intr_msix(int irq, void *cookie); +void t4_sge_init(struct adapter *adap); +void t4_sge_start(struct adapter *adap); +void t4_sge_stop(struct adapter *adap); + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +void t4_intr_clear(struct adapter *adapter); +int t4_slow_intr_handler(struct adapter *adapter); + +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); +int t4_seeprom_wp(struct adapter *adapter, bool enable); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_check_fw_version(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +void t4_fatal_err(struct adapter *adapter); +void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); +int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, + int filter_index, int enable); +void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, + int filter_index, int *enabled); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags); +int t4_read_rss(struct adapter *adapter, u16 *entries); +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); + +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); + +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr); +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable); + +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_early_init(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks); +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp); +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CXGB4_H__ */ diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c new file mode 100644 index 00000000000..5f582dba928 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -0,0 +1,3388 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitmap.h> +#include <linux/crc32.h> +#include <linux/ctype.h> +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/log2.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/rtnetlink.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/sockios.h> +#include <linux/vmalloc.h> +#include <linux/workqueue.h> +#include <net/neighbour.h> +#include <net/netevent.h> +#include <asm/uaccess.h> + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "l2t.h" + +#define DRV_VERSION "1.0.0-ko" +#define DRV_DESC "Chelsio T4 Network Driver" + +/* + * Max interrupt hold-off timer value in us. Queues fall back to this value + * under extreme memory pressure so it's largish to give the system time to + * recover. + */ +#define MAX_SGE_TIMERVAL 200U + +enum { + MEMWIN0_APERTURE = 65536, + MEMWIN0_BASE = 0x30000, + MEMWIN1_APERTURE = 32768, + MEMWIN1_BASE = 0x28000, + MEMWIN2_APERTURE = 2048, + MEMWIN2_BASE = 0x1b800, +}; + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } + +static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { + CH_DEVICE(0xa000), /* PE10K */ + { 0, } +}; + +#define FW_FNAME "cxgb4/t4fw.bin" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); +MODULE_FIRMWARE(FW_FNAME); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and INTx interrupts + * msi = 0: force INTx interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); + +/* + * Queue interrupt hold-off timer values. Queues default to the first of these + * upon creation. + */ +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; + +module_param_array(intr_holdoff, uint, NULL, 0644); +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " + "0..4 in microseconds"); + +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; + +module_param_array(intr_cnt, uint, NULL, 0644); +MODULE_PARM_DESC(intr_cnt, + "thresholds 1..3 for queue interrupt packet counters"); + +static int vf_acls; + +#ifdef CONFIG_PCI_IOV +module_param(vf_acls, bool, 0644); +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); + +static unsigned int num_vf[4]; + +module_param_array(num_vf, uint, NULL, 0644); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +#endif + +static struct dentry *cxgb4_debugfs_root; + +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(uld_mutex); +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; +static const char *uld_str[] = { "RDMA", "iSCSI" }; + +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; + + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, + fc[p->link_cfg.fc]); + } +} + +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) +{ + struct net_device *dev = adapter->port[port_id]; + + /* Skip changes from disabled ports. */ + if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { + if (link_stat) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + link_report(dev); + } +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + netdev_info(dev, "port module unplugged\n"); + else + netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct netdev_hw_addr *ha; + int uc_cnt = netdev_uc_count(dev); + int mc_cnt = netdev_mc_count(dev); + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + netdev_for_each_uc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + /* next set up the multicast addresses */ + netdev_for_each_mc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set Rx properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, + sleep_ok); + return ret; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, + true); + if (ret == 0) { + ret = t4_change_mac(pi->adapter, 0, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true, + false); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); + if (ret == 0) + ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); + return ret; +} + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + const struct cpl_sge_egr_update *p = (void *)rsp; + unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); + struct sge_txq *txq = q->adap->sge.egr_map[qid]; + + txq->restarts++; + if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { + struct sge_eth_txq *eq; + + eq = container_of(txq, struct sge_eth_txq, q); + netif_tx_wake_queue(eq->txq); + } else { + struct sge_ofld_txq *oq; + + oq = container_of(txq, struct sge_ofld_txq, q); + tasklet_schedule(&oq->qresume_tsk); + } + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *p = (void *)rsp; + + if (p->type == 0) + t4_handle_fw_rpl(q->adap, p->data); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (void *)rsp; + + do_l2t_write_rpl(q->adap, p); + } else + dev_err(q->adap->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + return 0; +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + rxq->stats.nomem++; + return -1; + } + if (gl == NULL) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static void disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for non-data events used with MSI-X. + */ +static irqreturn_t t4_nondata_intr(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); + if (v & PFSW) { + adap->swintr = 1; + t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); + } + t4_slow_intr_handler(adap); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; + + /* non-data interrupts */ + snprintf(adap->msix_info[0].desc, n, "%s", adap->name); + adap->msix_info[0].desc[n] = 0; + + /* FW events */ + snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); + adap->msix_info[1].desc[n] = 0; + + /* Ethernet queues */ + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", + d->name, i); + adap->msix_info[msi_idx].desc[n] = 0; + } + } + + /* offload queues */ + for_each_ofldrxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } + for_each_rdmarxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } +} + +static int request_msix_queue_irqs(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + + err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, + adap->msix_info[1].desc, &s->fw_evtq); + if (err) + return err; + + for_each_ethrxq(s, ethqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ethrxq[ethqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_ofldrxq(s, ofldqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ofldrxq[ofldqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_rdmarxq(s, rdmaqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->rdmarxq[rdmaqidx].rspq); + if (err) + goto unwind; + msi++; + } + return 0; + +unwind: + while (--rdmaqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->rdmarxq[rdmaqidx].rspq); + while (--ofldqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->ofldrxq[ofldqidx].rspq); + while (--ethqidx >= 0) + free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + return err; +} + +static void free_msix_queue_irqs(struct adapter *adap) +{ + int i, msi = 2; + struct sge *s = &adap->sge; + + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + for_each_ethrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + for_each_ofldrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + for_each_rdmarxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for all ports since the mapping + * table has plenty of entries. + */ +static int setup_rss(struct adapter *adap) +{ + int i, j, err; + u16 rss[MAX_ETH_QSETS]; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++) + rss[j] = q[j].rspq.abs_id; + + err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, + rss, pi->nqsets); + if (err) + return err; + } + return 0; +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (q && q->handler) + napi_disable(&q->napi); + } +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (!q) + continue; + if (q->handler) + napi_enable(&q->napi); + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + SEINTARM(q->intr_params) | + INGRESSQID(q->cntxt_id)); + } +} + +/** + * setup_sge_queues - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adap) +{ + int err, msi_idx, i, j; + struct sge *s = &adap->sge; + + bitmap_zero(s->starving_fl, MAX_EGRQ); + bitmap_zero(s->txq_maperr, MAX_EGRQ); + + if (adap->flags & USING_MSIX) + msi_idx = 1; /* vector 0 is for non-queue interrupts */ + else { + err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, + NULL, NULL); + if (err) + return err; + msi_idx = -((int)s->intrq.abs_id + 1); + } + + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + msi_idx, NULL, fwevtq_handler); + if (err) { +freeout: t4_free_sge_resources(adap); + return err; + } + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, + msi_idx, &q->fl, + t4_ethrx_handler); + if (err) + goto freeout; + q->rspq.idx = j; + memset(&q->stats, 0, sizeof(q->stats)); + } + for (j = 0; j < pi->nqsets; j++, t++) { + err = t4_sge_alloc_eth_txq(adap, t, dev, + netdev_get_tx_queue(dev, j), + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + } + + j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ + for_each_ofldrxq(s, i) { + struct sge_ofld_rxq *q = &s->ofldrxq[i]; + struct net_device *dev = adap->port[i / j]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, + &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->ofld_rxq[i] = q->rspq.abs_id; + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + + for_each_rdmarxq(s, i) { + struct sge_ofld_rxq *q = &s->rdmarxq[i]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], + msi_idx, &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->rdma_rxq[i] = q->rspq.abs_id; + } + + for_each_port(adap, i) { + /* + * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't + * have RDMA queues, and that's the right value. + */ + err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], + s->fw_evtq.cntxt_id, + s->rdmarxq[i].rspq.cntxt_id); + if (err) + goto freeout; + } + + t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | + QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); + return 0; +} + +/* + * Returns 0 if new FW was successfully loaded, a positive errno if a load was + * started but failed, and a negative errno if flash load couldn't start. + */ +static int upgrade_fw(struct adapter *adap) +{ + int ret; + u32 vers; + const struct fw_hdr *hdr; + const struct firmware *fw; + struct device *dev = adap->pdev_dev; + + ret = request_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "unable to load firmware image " FW_FNAME + ", error %d\n", ret); + return ret; + } + + hdr = (const struct fw_hdr *)fw->data; + vers = ntohl(hdr->fw_ver); + if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { + ret = -EINVAL; /* wrong major version, won't do */ + goto out; + } + + /* + * If the flash FW is unusable or we found something newer, load it. + */ + if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || + vers > adap->params.fw_vers) { + ret = -t4_load_fw(adap, fw->data, fw->size); + if (!ret) + dev_info(dev, "firmware upgraded to version %pI4 from " + FW_FNAME "\n", &hdr->fw_ver); + } +out: release_firmware(fw); + return ret; +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + void *p = kmalloc(size, GFP_KERNEL); + + if (!p) + p = vmalloc(size); + if (p) + memset(p, 0, size); + return p; +} + +/* + * Free memory allocated through alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +/* + * Implementation of ethtool operations. + */ + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T4_REGMAP_SIZE (160 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T4_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* + * port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *ap) +{ + return 4 | (ap->params.rev << 10); +} + +static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, + unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = t4_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + static const unsigned int reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11190, + 0x19040, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e240, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e640, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea40, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee40, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f240, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f640, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa40, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe40, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + int i; + struct adapter *ap = netdev2adap(dev); + + regs->version = mk_adap_vers(ap); + + memset(buf, 0, T4_REGMAP_SIZE); + for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, 0, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, u32 data) +{ + if (data == 0) + data = 2; /* default to 2 seconds */ + + return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, + data * 5); +} + +static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + else if (type == FW_PORT_TYPE_FIBER) + v |= SUPPORTED_FIBRE; + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XAUI) + cmd->port = PORT_TP; + else if (p->port_type == FW_PORT_TYPE_FIBER) + cmd->port = PORT_FIBRE; + else if (p->port_type == FW_PORT_TYPE_TWINAX) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == SPEED_100) + return FW_PORT_CAP_SPEED_100M; + if (speed == SPEED_1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == SPEED_10000) + return FW_PORT_CAP_SPEED_10G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* + * PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(cmd->speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(cmd->speed); + + if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || + cmd->speed == SPEED_10000) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static u32 get_rx_csum(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + return p->rx_offload & RX_CSO; +} + +static int set_rx_csum(struct net_device *dev, u32 data) +{ + struct port_info *p = netdev_priv(dev); + + if (data) + p->rx_offload |= RX_CSO; + else + p->rx_offload &= ~RX_CSO; + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +static int closest_timer(const struct sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adap: the adapter + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt) +{ + if ((us | cnt) == 0) + cnt = 1; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + us = us == 0 ? 6 : closest_timer(&adap->sge, us); + q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, + c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + return 0; +} + +/* + * Translate a physical EEPROM address to virtual. The first 1K is accessed + * through virtual addresses starting at 31K, the rest is accessed through + * virtual addresses starting at 0. This mapping is correct only for PF0. + */ +static int eeprom_ptov(unsigned int phys_addr) +{ + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024; + return -EINVAL; +} + +/* + * The next two routines implement eeprom read/write from physical addresses. + * The physical->virtual translation is correct only for PF0. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* + * RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + ret = t4_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + return err; +} + +static int set_tso(struct net_device *dev, u32 value) +{ + if (value) + dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + return 0; +} + +static struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_rx_csum = get_rx_csum, + .set_rx_csum = set_rx_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, + .set_sg = ethtool_op_set_sg, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .set_tso = set_tso, + .flash_device = set_flash, +}; + +/* + * debugfs support + */ + +static int mem_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file->f_path.dentry->d_inode->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = t4_mc_read(adap, pos, data, NULL); + else + ret = t4_edc_read(adap, mem, pos, data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = mem_open, + .read = mem_read, +}; + +static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +static int __devinit setup_debugfs(struct adapter *adap) +{ + int i; + + if (IS_ERR_OR_NULL(adap->debugfs_root)) + return -1; + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); + if (i & EDRAM0_ENABLE) + add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE) + add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (adap->l2t) + debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, + &t4_l2t_fops); + return 0; +} + +/* + * upper-layer driver support + */ + +/* + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgb4_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + spin_lock_bh(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} +EXPORT_SYMBOL(cxgb4_alloc_atid); + +/* + * Release an active-open TID. + */ +void cxgb4_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); +} +EXPORT_SYMBOL(cxgb4_free_atid); + +/* + * Allocate a server TID and set it to the supplied value. + */ +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_first_zero_bit(t->stid_bmap, t->nstids); + if (stid < t->nstids) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + if (stid < 0) + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid += t->stid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_stid); + +/* + * Release a server TID. + */ +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) +{ + stid -= t->stid_base; + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) + __clear_bit(stid, t->stid_bmap); + else + bitmap_release_region(t->stid_bmap, stid, 2); + t->stid_tab[stid].data = NULL; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} +EXPORT_SYMBOL(cxgb4_free_stid); + +/* + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct sk_buff *skb, unsigned int chan, + unsigned int tid) +{ + struct cpl_tid_release *req; + + set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +/* + * Queue a TID release request and if necessary schedule a work queue to + * process it. + */ +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid) +{ + void **p = &t->tid_tab[tid]; + struct adapter *adap = container_of(t, struct adapter, tids); + + spin_lock_bh(&adap->tid_release_lock); + *p = adap->tid_release_head; + /* Low 2 bits encode the Tx channel number */ + adap->tid_release_head = (void **)((uintptr_t)p | chan); + if (!adap->tid_release_task_busy) { + adap->tid_release_task_busy = true; + schedule_work(&adap->tid_release_task); + } + spin_unlock_bh(&adap->tid_release_lock); +} +EXPORT_SYMBOL(cxgb4_queue_tid_release); + +/* + * Process the list of pending TID release requests. + */ +static void process_tid_release_list(struct work_struct *work) +{ + struct sk_buff *skb; + struct adapter *adap; + + adap = container_of(work, struct adapter, tid_release_task); + + spin_lock_bh(&adap->tid_release_lock); + while (adap->tid_release_head) { + void **p = adap->tid_release_head; + unsigned int chan = (uintptr_t)p & 3; + p = (void *)p - chan; + + adap->tid_release_head = *p; + *p = NULL; + spin_unlock_bh(&adap->tid_release_lock); + + while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL))) + schedule_timeout_uninterruptible(1); + + mk_tid_release(skb, chan, p - adap->tids.tid_tab); + t4_ofld_send(adap, skb); + spin_lock_bh(&adap->tid_release_lock); + } + adap->tid_release_task_busy = false; + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +{ + void *old; + struct sk_buff *skb; + struct adapter *adap = container_of(t, struct adapter, tids); + + old = t->tid_tab[tid]; + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + t->tid_tab[tid] = NULL; + mk_tid_release(skb, chan, tid); + t4_ofld_send(adap, skb); + } else + cxgb4_queue_tid_release(t, chan, tid); + if (old) + atomic_dec(&t->tids_in_use); +} +EXPORT_SYMBOL(cxgb4_remove_tid); + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int natids = t->natids; + + size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + + t->nstids * sizeof(*t->stid_tab) + + BITS_TO_LONGS(t->nstids) * sizeof(long); + t->tid_tab = t4_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + t->stids_in_use = 0; + t->afree = NULL; + t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + bitmap_zero(t->stid_bmap, t->nstids); + return 0; +} + +/** + * cxgb4_create_server - create an IP server + * @dev: the device + * @stid: the server TID + * @sip: local IP address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IP server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip = sip; + req->peer_ip = htonl(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server); + +/** + * cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +/** + * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU + * @mtus: the HW MTU table + * @mtu: the target MTU + * @idx: index of selected entry in the MTU table + * + * Returns the index and the value in the HW MTU table that is closest to + * but does not exceed @mtu, unless @mtu is smaller than any value in the + * table, in which case that smallest available value is selected. + */ +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx) +{ + unsigned int i = 0; + + while (i < NMTUS - 1 && mtus[i + 1] <= mtu) + ++i; + if (idx) + *idx = i; + return mtus[i]; +} +EXPORT_SYMBOL(cxgb4_best_mtu); + +/** + * cxgb4_port_chan - get the HW channel of a port + * @dev: the net device for the port + * + * Return the HW Tx channel of the given port. + */ +unsigned int cxgb4_port_chan(const struct net_device *dev) +{ + return netdev2pinfo(dev)->tx_chan; +} +EXPORT_SYMBOL(cxgb4_port_chan); + +/** + * cxgb4_port_viid - get the VI id of a port + * @dev: the net device for the port + * + * Return the VI id of the given port. + */ +unsigned int cxgb4_port_viid(const struct net_device *dev) +{ + return netdev2pinfo(dev)->viid; +} +EXPORT_SYMBOL(cxgb4_port_viid); + +/** + * cxgb4_port_idx - get the index of a port + * @dev: the net device for the port + * + * Return the index of the given port. + */ +unsigned int cxgb4_port_idx(const struct net_device *dev) +{ + return netdev2pinfo(dev)->port_id; +} +EXPORT_SYMBOL(cxgb4_port_idx); + +/** + * cxgb4_netdev_by_hwid - return the net device of a HW port + * @pdev: identifies the adapter + * @id: the HW port id + * + * Return the net device associated with the interface with the given HW + * id. + */ +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) +{ + const struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap || id >= NCHAN) + return NULL; + id = adap->chan_map[id]; + return id < MAX_NPORTS ? adap->port[id] : NULL; +} +EXPORT_SYMBOL(cxgb4_netdev_by_hwid); + +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + struct adapter *adap = pci_get_drvdata(pdev); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, v4, v6); + spin_unlock(&adap->stats_lock); +} +EXPORT_SYMBOL(cxgb4_get_tcp_stats); + +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order) +{ + struct adapter *adap = netdev2adap(dev); + + t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); + t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | + HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | + HPZ3(pgsz_order[3])); +} +EXPORT_SYMBOL(cxgb4_iscsi_init); + +static struct pci_driver cxgb4_driver; + +static void check_neigh_update(struct neighbour *neigh) +{ + const struct device *parent; + const struct net_device *netdev = neigh->dev; + + if (netdev->priv_flags & IFF_802_1Q_VLAN) + netdev = vlan_dev_real_dev(netdev); + parent = netdev->dev.parent; + if (parent && parent->driver == &cxgb4_driver.driver) + t4_l2t_update(dev_get_drvdata(parent), neigh); +} + +static int netevent_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + switch (event) { + case NETEVENT_NEIGH_UPDATE: + check_neigh_update(data); + break; + case NETEVENT_PMTU_UPDATE: + case NETEVENT_REDIRECT: + default: + break; + } + return 0; +} + +static bool netevent_registered; +static struct notifier_block cxgb4_netevent_nb = { + .notifier_call = netevent_cb +}; + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + lli.pdev = adap->pdev; + lli.l2t = adap->l2t; + lli.tids = &adap->tids; + lli.ports = adap->port; + lli.vr = &adap->vres; + lli.mtus = adap->params.mtus; + if (uld == CXGB4_ULD_RDMA) { + lli.rxq_ids = adap->sge.rdma_rxq; + lli.nrxq = adap->sge.rdmaqs; + } else if (uld == CXGB4_ULD_ISCSI) { + lli.rxq_ids = adap->sge.ofld_rxq; + lli.nrxq = adap->sge.ofldqsets; + } + lli.ntxq = adap->sge.ofldqsets; + lli.nchan = adap->params.nports; + lli.nports = adap->params.nports; + lli.wr_cred = adap->params.ofldq_wr_cred; + lli.adapter_type = adap->params.rev; + lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); + lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); + lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); + lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); + lli.fw_vers = adap->params.fw_vers; + + handle = ulds[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + uld_str[uld], PTR_ERR(handle)); + return; + } + + adap->uld_handle[uld] = handle; + + if (!netevent_registered) { + register_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = true; + } +} + +static void attach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (ulds[i].add) + uld_attach(adap, i); + mutex_unlock(&uld_mutex); +} + +static void detach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) { + ulds[i].state_change(adap->uld_handle[i], + CXGB4_STATE_DETACH); + adap->uld_handle[i] = NULL; + } + if (netevent_registered && list_empty(&adapter_list)) { + unregister_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = false; + } + mutex_unlock(&uld_mutex); +} + +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) + ulds[i].state_change(adap->uld_handle[i], new_state); + mutex_unlock(&uld_mutex); +} + +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + if (ulds[type].add) { + ret = -EBUSY; + goto out; + } + ulds[type] = *p; + list_for_each_entry(adap, &adapter_list, list_node) + uld_attach(adap, type); +out: mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_uld); + +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) + adap->uld_handle[type] = NULL; + ulds[type].add = NULL; + mutex_unlock(&uld_mutex); + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_uld); + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err = 0; + + if (!(adap->flags & FULL_INIT_DONE)) { + err = setup_sge_queues(adap); + if (err) + goto out; + err = setup_rss(adap); + if (err) { + t4_free_sge_resources(adap); + goto out; + } + if (adap->flags & USING_MSIX) + name_msix_vecs(adap); + adap->flags |= FULL_INIT_DONE; + } + + if (adap->flags & USING_MSIX) { + err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_queue_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else { + err = request_irq(adap->pdev->irq, t4_intr_handler(adap), + (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + adap->name, adap); + if (err) + goto irq_err; + } + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + notify_ulds(adap, CXGB4_STATE_UP); + out: + return err; + irq_err: + dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); + goto out; +} + +static void cxgb_down(struct adapter *adapter) +{ + t4_intr_disable(adapter); + cancel_work_sync(&adapter->tid_release_task); + adapter->tid_release_task_busy = false; + + if (adapter->flags & USING_MSIX) { + free_msix_queue_irqs(adapter); + free_irq(adapter->msix_info[0].vec, adapter); + } else + free_irq(adapter->pdev->irq, adapter); + quiesce_rx(adapter); +} + +/* + * net_device operations + */ +static int cxgb_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + return err; + + dev->real_num_tx_queues = pi->nqsets; + set_bit(pi->tx_chan, &adapter->open_device_map); + link_start(dev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + ret = t4_enable_vi(adapter, 0, pi->viid, false, false); + + clear_bit(pi->tx_chan, &adapter->open_device_map); + + if (!adapter->open_device_map) + cxgb_down(adapter); + return 0; +} + +static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +{ + struct port_stats stats; + struct port_info *p = netdev_priv(dev); + struct adapter *adapter = p->adapter; + struct net_device_stats *ns = &dev->stats; + + spin_lock(&adapter->stats_lock); + t4_get_port_stats(adapter, p->tx_chan, &stats); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = stats.tx_octets; + ns->tx_packets = stats.tx_frames; + ns->rx_bytes = stats.rx_octets; + ns->rx_packets = stats.rx_frames; + ns->multicast = stats.rx_mcast_frames; + + /* detailed rx_errors */ + ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + + stats.rx_runt; + ns->rx_over_errors = 0; + ns->rx_crc_errors = stats.rx_fcs_err; + ns->rx_frame_errors = stats.rx_symbol_err; + ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + stats.rx_ovflow2 + stats.rx_ovflow3 + + stats.rx_trunc0 + stats.rx_trunc1 + + stats.rx_trunc2 + stats.rx_trunc3; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = 0; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; + return ns; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + int ret = 0, prtad, devad; + struct port_info *pi = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; + + switch (cmd) { + case SIOCGMIIPHY: + if (pi->mdio_addr < 0) + return -EOPNOTSUPP; + data->phy_id = pi->mdio_addr; + break; + case SIOCGMIIREG: + case SIOCSMIIREG: + if (mdio_phy_id_is_c45(data->phy_id)) { + prtad = mdio_phy_id_prtad(data->phy_id); + devad = mdio_phy_id_devad(data->phy_id); + } else if (data->phy_id < 32) { + prtad = data->phy_id; + devad = 0; + data->reg_num &= 0x1f; + } else + return -EINVAL; + + if (cmd == SIOCGMIIREG) + ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, + data->reg_num, &data->val_out); + else + ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, + data->reg_num, data->val_in); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ + return -EINVAL; + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, + true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, + addr->sa_data, true, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct port_info *pi = netdev_priv(dev); + + pi->vlan_grp = grp; + t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & USING_MSIX) { + int i; + struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; + + for (i = pi->nqsets; i; i--, rx++) + t4_sge_intr_msix(0, &rx->rspq); + } else + t4_intr_handler(adap)(0, adap); +} +#endif + +static const struct net_device_ops cxgb4_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t4_eth_xmit, + .ndo_get_stats = cxgb_get_stats, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, + .ndo_vlan_rx_register = vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +void t4_fatal_err(struct adapter *adap) +{ + t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); + t4_intr_disable(adap); + dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 bar0; + + bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), + (bar0 + MEMWIN0_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), + (bar0 + MEMWIN1_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), + (bar0 + MEMWIN2_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); +} + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + */ +static int adap_init0(struct adapter *adap) +{ + int ret; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + struct fw_caps_config_cmd c; + + ret = t4_check_fw_version(adap); + if (ret == -EINVAL || ret > 0) { + if (upgrade_fw(adap) >= 0) /* recache FW version */ + ret = t4_check_fw_version(adap); + } + if (ret < 0) + return ret; + + /* contact FW, request master */ + ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); + if (ret < 0) { + dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", + ret); + return ret; + } + + /* reset device */ + ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + + /* get device capabilities */ + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c.retval_len16 = htonl(FW_LEN16(c)); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret < 0) + goto bye; + + /* select capabilities we'll be using */ + if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + goto bye; + } + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); + if (ret < 0) + goto bye; + + ret = t4_config_glbl_rss(adap, 0, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + if (ret < 0) + goto bye; + + ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, + FW_CMD_CAP_PF, FW_CMD_CAP_PF); + if (ret < 0) + goto bye; + + for (v = 0; v < SGE_NTIMERS - 1; v++) + adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); + adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + adap->sge.counter_val[0] = 1; + for (v = 1; v < SGE_NCOUNTERS; v++) + adap->sge.counter_val[v] = min(intr_cnt[v - 1], + THRESHOLD_3_MASK); + t4_sge_init(adap); + + /* get basic stuff going */ + ret = t4_early_init(adap, 0); + if (ret < 0) + goto bye; + +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) + + params[0] = FW_PARAM_DEV(PORTVEC); + params[1] = FW_PARAM_PFVF(L2T_START); + params[2] = FW_PARAM_PFVF(L2T_END); + params[3] = FW_PARAM_PFVF(FILTER_START); + params[4] = FW_PARAM_PFVF(FILTER_END); + ret = t4_query_params(adap, 0, 0, 0, 5, params, val); + if (ret < 0) + goto bye; + port_vec = val[0]; + adap->tids.ftid_base = val[3]; + adap->tids.nftids = val[4] - val[3] + 1; + + if (c.ofldcaps) { + /* query offload-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + params[1] = FW_PARAM_PFVF(SERVER_START); + params[2] = FW_PARAM_PFVF(SERVER_END); + params[3] = FW_PARAM_PFVF(TDDP_START); + params[4] = FW_PARAM_PFVF(TDDP_END); + params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + adap->tids.stid_base = val[1]; + adap->tids.nstids = val[2] - val[1] + 1; + adap->vres.ddp.start = val[3]; + adap->vres.ddp.size = val[4] - val[3] + 1; + adap->params.ofldq_wr_cred = val[5]; + adap->params.offload = 1; + } + if (c.rdmacaps) { + params[0] = FW_PARAM_PFVF(STAG_START); + params[1] = FW_PARAM_PFVF(STAG_END); + params[2] = FW_PARAM_PFVF(RQ_START); + params[3] = FW_PARAM_PFVF(RQ_END); + params[4] = FW_PARAM_PFVF(PBL_START); + params[5] = FW_PARAM_PFVF(PBL_END); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->vres.stag.start = val[0]; + adap->vres.stag.size = val[1] - val[0] + 1; + adap->vres.rq.start = val[2]; + adap->vres.rq.size = val[3] - val[2] + 1; + adap->vres.pbl.start = val[4]; + adap->vres.pbl.size = val[5] - val[4] + 1; + } + if (c.iscsicaps) { + params[0] = FW_PARAM_PFVF(ISCSI_START); + params[1] = FW_PARAM_PFVF(ISCSI_END); + ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->vres.iscsi.start = val[0]; + adap->vres.iscsi.size = val[1] - val[0] + 1; + } +#undef FW_PARAM_PFVF +#undef FW_PARAM_DEV + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + adap->flags |= FW_OK; + + /* These are finalized by FW initialization, load their values now */ + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); + v = t4_read_reg(adap, TP_PIO_DATA); + t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + setup_memwin(adap); + return 0; + + /* + * If a command timed out or failed with EIO FW does not operate within + * its spec or something catastrophic happened to HW/FW, stop issuing + * commands. + */ +bye: if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, 0); + return ret; +} + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; +} + +static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, + unsigned int size, unsigned int iqe_size) +{ + q->intr_params = QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); + q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; + q->iqe_len = iqe_size; + q->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and type + * of ports we found and the number of available CPUs. Most settings can be + * modified by the admin prior to actual use. + */ +static void __devinit cfg_queues(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int i, q10g = 0, n10g = 0, qidx = 0; + + for_each_port(adap, i) + n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + + s->ethqsets = qidx; + s->max_ethqsets = qidx; /* MSI-X may lower it later */ + + if (is_offload(adap)) { + /* + * For offload we use 1 queue/channel if all ports are up to 1G, + * otherwise we divide all available queues amongst the channels + * capped by the number of available cores. + */ + if (n10g) { + i = min_t(int, ARRAY_SIZE(s->ofldrxq), + num_online_cpus()); + s->ofldqsets = roundup(i, adap->params.nports); + } else + s->ofldqsets = adap->params.nports; + /* For RDMA one Rx queue per channel suffices */ + s->rdmaqs = adap->params.nports; + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) + s->ctrlq[i].q.size = 512; + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) + s->ofldtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { + struct sge_ofld_rxq *r = &s->ofldrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSI; + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { + struct sge_ofld_rxq *r = &s->rdmarxq[i]; + + init_rspq(&r->rspq, 0, 0, 511, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + r->fl.size = 72; + } + + init_rspq(&s->fw_evtq, 6, 0, 512, 64); + init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adap, int n) +{ + int i; + struct port_info *pi; + + while (n < adap->sge.ethqsets) + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adap->sge.ethqsets--; + if (adap->sge.ethqsets <= n) + break; + } + } + + n = 0; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ +#define EXTRA_VECS 2 + +static int __devinit enable_msix(struct adapter *adap) +{ + int ofld_need = 0; + int i, err, want, need; + struct sge *s = &adap->sge; + unsigned int nchan = adap->params.nports; + struct msix_entry entries[MAX_INGQ + 1]; + + for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries[i].entry = i; + + want = s->max_ethqsets + EXTRA_VECS; + if (is_offload(adap)) { + want += s->rdmaqs + s->ofldqsets; + /* need nchan for each possible ULD */ + ofld_need = 2 * nchan; + } + need = adap->params.nports + EXTRA_VECS + ofld_need; + + while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) + want = err; + + if (!err) { + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + } else if (err > 0) + dev_info(adap->pdev_dev, + "only %d MSI-X vectors left, not using MSI-X\n", err); + return err; +} + +#undef EXTRA_VECS + +static void __devinit print_port_info(struct adapter *adap) +{ + static const char *base[] = { + "R", "KX4", "T", "KX", "T", "KR", "CX4" + }; + + int i; + char buf[80]; + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + const struct port_info *pi = netdev_priv(dev); + char *bufp = buf; + + if (!test_bit(i, &adap->registered_device_map)) + continue; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", base[pi->port_type]); + + netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", + adap->params.vpd.id, adap->params.rev, + buf, is_offload(adap) ? "R" : "", + adap->params.pci.width, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + if (adap->name == dev->name) + netdev_info(dev, "S/N: %s, E/C: %s\n", + adap->params.vpd.sn, adap->params.vpd.ec); + } +} + +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int func, i, err; + struct port_info *pi; + unsigned int highdma = 0; + struct adapter *adapter = NULL; + + printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + /* We control everything through PF 0 */ + func = PCI_FUNC(pdev->devfn); + if (func > 0) + goto sriov; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out_release_regions; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + highdma = NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_disable_device; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_disable_device; + } + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + pci_save_state(pdev); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_disable_device; + } + + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); + + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->tid_release_lock); + + INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + + err = t4_prep_adapter(adapter); + if (err) + goto out_unmap_bar; + err = adap_init0(adapter); + if (err) + goto out_unmap_bar; + + for_each_port(adapter, i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->rx_offload = RX_CSO; + pi->port_id = i; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev->irq = pdev->irq; + + netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_GRO | highdma; + netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->vlan_features = netdev->features & VLAN_FEAT; + + netdev->netdev_ops = &cxgb4_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + } + + pci_set_drvdata(pdev, adapter); + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, 0, 0, 0); + if (err) + goto out_free_dev; + } + + /* + * Configure queues and allocate tables now, they can be needed as + * soon as the first register_netdev completes. + */ + cfg_queues(adapter); + + adapter->l2t = t4_init_l2t(); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); + adapter->params.offload = 0; + } + + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { + dev_warn(&pdev->dev, "could not allocate TID table, " + "continuing\n"); + adapter->params.offload = 0; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + err = register_netdev(adapter->port[i]); + if (err) + dev_warn(&pdev->dev, + "cannot register net device %s, skipping\n", + adapter->port[i]->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i]->name; + + __set_bit(i, &adapter->registered_device_map); + adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; + } + } + if (!adapter->registered_device_map) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + + if (cxgb4_debugfs_root) { + adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), + cxgb4_debugfs_root); + setup_debugfs(adapter); + } + + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + if (is_offload(adapter)) + attach_ulds(adapter); + + print_port_info(adapter); + +sriov: +#ifdef CONFIG_PCI_IOV + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (pci_enable_sriov(pdev, num_vf[func]) == 0) + dev_info(&pdev->dev, + "instantiated %u virtual functions\n", + num_vf[func]); +#endif + return 0; + + out_free_dev: + t4_free_mem(adapter->tids.tid_tab); + t4_free_mem(adapter->l2t); + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + out_unmap_bar: + iounmap(adapter->regs); + out_free_adapter: + kfree(adapter); + out_disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + out_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + pci_disable_sriov(pdev); + + if (adapter) { + int i; + + if (is_offload(adapter)) + detach_ulds(adapter); + + for_each_port(adapter, i) + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i]); + + if (adapter->debugfs_root) + debugfs_remove_recursive(adapter->debugfs_root); + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + } else if (PCI_FUNC(pdev->devfn) > 0) + pci_release_regions(pdev); +} + +static struct pci_driver cxgb4_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), +}; + +static int __init cxgb4_init_module(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4_debugfs_root) + pr_warning("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4_driver); + if (ret < 0) + debugfs_remove(cxgb4_debugfs_root); + return ret; +} + +static void __exit cxgb4_cleanup_module(void) +{ + pci_unregister_driver(&cxgb4_driver); + debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ +} + +module_init(cxgb4_init_module); +module_exit(cxgb4_cleanup_module); diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h new file mode 100644 index 00000000000..5b98546ac92 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_uld.h @@ -0,0 +1,239 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_OFLD_H +#define __CXGB4_OFLD_H + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <asm/atomic.h> + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* control messages */ +}; + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ + FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* Special asynchronous notification message */ +#define CXGB4_MSG_AN ((void *)1) + +struct serv_entry { + void *data; +}; + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables. The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + + struct serv_entry *stid_tab; + unsigned long *stid_bmap; + unsigned int nstids; + unsigned int stid_base; + + union aopen_entry *atid_tab; + unsigned int natids; + + unsigned int nftids; + unsigned int ftid_base; + + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union aopen_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock; + unsigned int stids_in_use; + + atomic_t tids_in_use; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) +{ + stid -= t->stid_base; + return stid < t->nstids ? t->stid_tab[stid].data : NULL; +} + +static inline void cxgb4_insert_tid(struct tid_info *t, void *data, + unsigned int tid) +{ + t->tid_tab[tid] = data; + atomic_inc(&t->tids_in_use); +} + +int cxgb4_alloc_atid(struct tid_info *t, void *data); +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); +void cxgb4_free_atid(struct tid_info *t, unsigned int atid); +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid); + +struct in6_addr; + +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue); +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue); + +static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) +{ + skb_set_queue_mapping(skb, (queue << 1) | prio); +} + +enum cxgb4_uld { + CXGB4_ULD_RDMA, + CXGB4_ULD_ISCSI, + CXGB4_ULD_MAX +}; + +enum cxgb4_state { + CXGB4_STATE_UP, + CXGB4_STATE_START_RECOVERY, + CXGB4_STATE_DOWN, + CXGB4_STATE_DETACH +}; + +struct pci_dev; +struct l2t_data; +struct net_device; +struct pkt_gl; +struct tp_tcp_stats; + +struct cxgb4_range { + unsigned int start; + unsigned int size; +}; + +struct cxgb4_virt_res { /* virtualized HW resources */ + struct cxgb4_range ddp; + struct cxgb4_range iscsi; + struct cxgb4_range stag; + struct cxgb4_range rq; + struct cxgb4_range pbl; +}; + +/* + * Block of information the LLD provides to ULDs attaching to a device. + */ +struct cxgb4_lld_info { + struct pci_dev *pdev; /* associated PCI device */ + struct l2t_data *l2t; /* L2 table */ + struct tid_info *tids; /* TID table */ + struct net_device **ports; /* device ports */ + const struct cxgb4_virt_res *vr; /* assorted HW resources */ + const unsigned short *mtus; /* MTU table */ + const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ + unsigned short nrxq; /* # of Rx queues */ + unsigned short ntxq; /* # of Tx queues */ + unsigned char nchan:4; /* # of channels */ + unsigned char nports:4; /* # of ports */ + unsigned char wr_cred; /* WR 16-byte credits */ + unsigned char adapter_type; /* type of adapter */ + unsigned char fw_api_ver; /* FW API version */ + unsigned int fw_vers; /* FW version */ + unsigned int iscsi_iolen; /* iSCSI max I/O length */ + unsigned short udb_density; /* # of user DB/page */ + unsigned short ucq_density; /* # of user CQs/page */ + void __iomem *gts_reg; /* address of GTS register */ + void __iomem *db_reg; /* address of kernel doorbell */ +}; + +struct cxgb4_uld_info { + const char *name; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); +}; + +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +unsigned int cxgb4_port_chan(const struct net_device *dev); +unsigned int cxgb4_port_viid(const struct net_device *dev); +unsigned int cxgb4_port_idx(const struct net_device *dev); +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx); +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order); +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len); +#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c new file mode 100644 index 00000000000..9f96724a133 --- /dev/null +++ b/drivers/net/cxgb4/l2t.c @@ -0,0 +1,624 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if.h> +#include <linux/if_vlan.h> +#include <linux/jhash.h> +#include <net/neighbour.h> +#include "cxgb4.h" +#include "l2t.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +#define VLAN_NONE 0xfff + +/* identifies sync vs async L2T_WRITE_REQs */ +#define F_SYNC_WR (1 << 12) + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct l2t_data { + rwlock_t lock; + atomic_t nfree; /* number of free entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + struct l2t_entry l2tab[L2T_SIZE]; +}; + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +/* + * To avoid having to check address families we do not allow v4 and v6 + * neighbors to be on the same hash chain. We keep v4 entries in the first + * half of available hash buckets and v6 in the second. + */ +enum { + L2T_SZ_HALF = L2T_SIZE / 2, + L2T_HASH_MASK = L2T_SZ_HALF - 1 +}; + +static inline unsigned int arp_hash(const u32 *key, int ifindex) +{ + return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; +} + +static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +{ + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); +} + +static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +{ + return addr_len == 4 ? arp_hash(addr, ifindex) : + ipv6_hash(addr, ifindex); +} + +/* + * Checks if an L2T entry is for the given IP/IPv6 address. It does not check + * whether the L2T entry and the address are of the same address family. + * Callers ensure an address is only checked against L2T entries of the same + * family, something made trivial by the separation of IP and IPv6 hash chains + * mentioned above. Returns 0 if there's a match, + */ +static int addreq(const struct l2t_entry *e, const u32 *addr) +{ + if (e->v6) + return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | + (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); + return e->addr[0] ^ addr[0]; +} + +static void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) +{ + struct sk_buff *skb; + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + e->idx | (sync ? F_SYNC_WR : 0) | + TID_QID(adap->sge.fw_evtq.abs_id))); + req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); + req->l2t_idx = htons(e->idx); + req->vlan = htons(e->vlan); + if (e->neigh) + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + t4_ofld_send(adap, skb); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + return 0; +} + +/* + * Send packets waiting in an L2T entry's ARP queue. Must be called with the + * entry locked. + */ +static void send_pending(struct adapter *adap, struct l2t_entry *e) +{ + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + skb->next = NULL; + t4_ofld_send(adap, skb); + } + e->arpq_tail = NULL; +} + +/* + * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a + * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T + * index it refers to. + */ +void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) +{ + unsigned int tid = GET_TID(rpl); + unsigned int idx = tid & (L2T_SIZE - 1); + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap->pdev_dev, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &adap->l2t->l2tab[idx]; + + spin_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) { + send_pending(adap, e); + e->state = (e->neigh->nud_state & NUD_STALE) ? + L2T_STATE_STALE : L2T_STATE_VALID; + } + spin_unlock(&e->lock); + } +} + +/* + * Add a packet to an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + skb->next = NULL; + if (e->arpq_head) + e->arpq_tail->next = skb; + else + e->arpq_head = skb; + e->arpq_tail = skb; +} + +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct adapter *adap = netdev2adap(dev); + +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return t4_ofld_send(adap, skb); + case L2T_STATE_RESOLVING: + case L2T_STATE_SYNC_WRITE: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_SYNC_WRITE && + e->state != L2T_STATE_RESOLVING) { + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + if (e->state == L2T_STATE_RESOLVING && + !neigh_event_send(e->neigh, NULL)) { + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + write_l2e(adap, e, 1); + spin_unlock_bh(&e->lock); + } + } + return 0; +} +EXPORT_SYMBOL(cxgb4_l2t_send); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = d->l2tab; atomic_read(&e->refcnt); ++e) + ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + + e->state = L2T_STATE_UNUSED; + return e; +} + +/* + * Called when an L2T entry has no more users. + */ +static void t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + } + spin_unlock_bh(&e->lock); + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +void cxgb4_l2t_release(struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_l2e_free(e); +} +EXPORT_SYMBOL(cxgb4_l2t_release); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority) +{ + u8 lport; + u16 vlan; + struct l2t_entry *e; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *)neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + if (neigh->dev->flags & IFF_LOOPBACK) + lport = netdev2pinfo(physdev)->tx_chan + 4; + else + lport = netdev2pinfo(physdev)->lport; + + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + vlan = vlan_dev_vlan_id(neigh->dev); + else + vlan = VLAN_NONE; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx && + e->vlan == vlan && e->lport == lport) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_RESOLVING; + memcpy(e->addr, addr, addr_len); + e->ifindex = ifidx; + e->hash = hash; + e->lport = lport; + e->v6 = addr_len == 16; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + e->vlan = vlan; + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} +EXPORT_SYMBOL(cxgb4_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ +static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +{ + while (arpq) { + struct sk_buff *skb = arpq; + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + arpq = skb->next; + skb->next = NULL; + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + } +} + +/* + * Called when the host's neighbor layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) +{ + struct l2t_entry *e; + struct sk_buff *arpq = NULL; + struct l2t_data *d = adap->l2t; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx) { + spin_lock(&e->lock); + if (atomic_read(&e->refcnt)) + goto found; + spin_unlock(&e->lock); + break; + } + read_unlock_bh(&d->lock); + return; + + found: + read_unlock(&d->lock); + + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + arpq = e->arpq_head; + e->arpq_head = e->arpq_tail = NULL; + } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && + e->arpq_head) { + write_l2e(adap, e, 1); + } + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) + write_l2e(adap, e, 0); + } + + spin_unlock_bh(&e->lock); + + if (arpq) + handle_failed_resolution(adap, arpq); +} + +/* + * Allocate an L2T entry for use by a switching rule. Such entries need to be + * explicitly freed and while busy they are not on any hash chain, so normal + * address resolution updates do not see them. + */ +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +{ + struct l2t_entry *e; + + write_lock_bh(&d->lock); + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_SWITCHING; + atomic_set(&e->refcnt, 1); + spin_unlock(&e->lock); + } + write_unlock_bh(&d->lock); + return e; +} + +/* + * Sets/updates the contents of a switching L2T entry that has been allocated + * with an earlier call to @t4_l2t_alloc_switching. + */ +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr) +{ + e->vlan = vlan; + e->lport = port; + memcpy(e->dmac, eth_addr, ETH_ALEN); + return write_l2e(adap, e, 0); +} + +struct l2t_data *t4_init_l2t(void) +{ + int i; + struct l2t_data *d; + + d = t4_alloc_mem(sizeof(*d)); + if (!d) + return NULL; + + d->rover = d->l2tab; + atomic_set(&d->nfree, L2T_SIZE); + rwlock_init(&d->lock); + + for (i = 0; i < L2T_SIZE; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +{ + struct l2t_entry *l2tab = seq->private; + + return pos >= L2T_SIZE ? NULL : &l2tab[pos]; +} + +static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; +} + +static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = l2t_get_idx(seq, *pos); + if (v) + ++*pos; + return v; +} + +static void l2t_seq_stop(struct seq_file *seq, void *v) +{ +} + +static char l2e_state(const struct l2t_entry *e) +{ + switch (e->state) { + case L2T_STATE_VALID: return 'V'; + case L2T_STATE_STALE: return 'S'; + case L2T_STATE_SYNC_WRITE: return 'W'; + case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_SWITCHING: return 'X'; + default: + return 'U'; + } +} + +static int l2t_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Idx IP address " + "Ethernet address VLAN/P LP State Users Port\n"); + else { + char ip[60]; + struct l2t_entry *e = v; + + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_SWITCHING) + ip[0] = '\0'; + else + sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); + seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", + e->idx, ip, e->dmac, + e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, + l2e_state(e), atomic_read(&e->refcnt), + e->neigh ? e->neigh->dev->name : ""); + spin_unlock_bh(&e->lock); + } + return 0; +} + +static const struct seq_operations l2t_seq_ops = { + .start = l2t_seq_start, + .next = l2t_seq_next, + .stop = l2t_seq_stop, + .show = l2t_seq_show +}; + +static int l2t_seq_open(struct inode *inode, struct file *file) +{ + int rc = seq_open(file, &l2t_seq_ops); + + if (!rc) { + struct adapter *adap = inode->i_private; + struct seq_file *seq = file->private_data; + + seq->private = adap->l2t->l2tab; + } + return rc; +} + +const struct file_operations t4_l2t_fops = { + .owner = THIS_MODULE, + .open = l2t_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h new file mode 100644 index 00000000000..643f27ed3cf --- /dev/null +++ b/drivers/net/cxgb4/l2t.h @@ -0,0 +1,110 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_L2T_H +#define __CXGB4_L2T_H + +#include <linux/spinlock.h> +#include <linux/if_ether.h> +#include <asm/atomic.h> + +struct adapter; +struct l2t_data; +struct neighbour; +struct net_device; +struct file_operations; +struct cpl_l2t_write_rpl; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr[4]; /* next hop IP or IPv6 address */ + int ifindex; /* neighbor's net_device's ifindex */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ + struct sk_buff *arpq_tail; + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u16 hash; /* hash bucket the entry is on */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 v6; /* whether entry is for IPv6 */ + u8 lport; /* associated offload logical interface */ + u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ +}; + +typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + void *handle; + arp_err_handler_t arp_err_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, + arp_err_handler_t handler) +{ + L2T_SKB_CB(skb)->handle = handle; + L2T_SKB_CB(skb)->arp_err_handler = handler; +} + +void cxgb4_l2t_release(struct l2t_entry *e); +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e); +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority); + +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr); +struct l2t_data *t4_init_l2t(void); +void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); + +extern const struct file_operations t4_l2t_fops; +#endif /* __CXGB4_L2T_H */ diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c new file mode 100644 index 00000000000..14adc58e71c --- /dev/null +++ b/drivers/net/cxgb4/sge.c @@ -0,0 +1,2431 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> +#include <linux/jiffies.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +/* + * Rx buffer size. We use largish buffers if possible but settle for single + * pages under memory shortage. + */ +#if PAGE_SHIFT >= 16 +# define FL_PG_ORDER 0 +#else +# define FL_PG_ORDER (16 - PAGE_SHIFT) +#endif + +/* RX_PULL_LEN should be <= RX_COPY_THRES */ +#define RX_COPY_THRES 256 +#define RX_PULL_LEN 128 + +/* + * Main body length for sk_buffs used for Rx Ethernet packets with fragments. + * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. + */ +#define RX_PKT_SKB_LEN 512 + +/* Ethernet header padding prepended to RX_PKTs */ +#define RX_PKT_PAD 2 + +/* + * Max number of Tx descriptors we clean up at a time. Should be modest as + * freeing skbs isn't cheap and it happens while holding locks. We just need + * to free packets faster than they arrive, we eventually catch up and keep + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + */ +#define MAX_TX_RECLAIM 16 + +/* + * Max number of Rx buffers we replenish at a time. Again keep this modest, + * allocating buffers isn't cheap either. + */ +#define MAX_RX_REFILL 16U + +/* + * Period of the Rx queue check timer. This timer is infrequent as it has + * something to do only when the system experiences severe memory shortage. + */ +#define RX_QCHECK_PERIOD (HZ / 2) + +/* + * Period of the Tx queue check timer. + */ +#define TX_QCHECK_PERIOD (HZ / 2) + +/* + * Max number of Tx descriptors to be reclaimed by the Tx timer. + */ +#define MAX_TIMER_TX_RECLAIM 100 + +/* + * Timer index used when backing off due to memory shortage. + */ +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will + * attempt to refill it. + */ +#define FL_STARVE_THRES 4 + +/* + * Suspend an Ethernet Tx queue with fewer available descriptors than this. + * This is the same as calc_tx_descs() for a TSO packet with + * nr_frags == MAX_SKB_FRAGS. + */ +#define ETHTXQ_STOP_THRES \ + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) + +/* + * Suspension threshold for non-Ethernet Tx queues. We require enough room + * for a full sized WR. + */ +#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 128 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +enum { + /* packet alignment in FL buffers */ + FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, + /* egress status entry size */ + STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + struct ulptx_sgl *sgl; +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + struct page *page; + dma_addr_t dma_addr; +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) +{ + return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +static inline bool is_buf_mapped(const struct rx_sw_desc *d) +{ + return !(d->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); +out_err: + return -ENOMEM; +} + +#ifdef CONFIG_NEED_DMA_MAP_STATE +static void unmap_skb(struct device *dev, const struct sk_buff *skb, + const dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) + dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); +} +#endif + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *q) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { +unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)q->stat) { + p = (const struct ulptx_sge_pair *)q->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)q->stat) { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)q->stat) + p = (const struct ulptx_sge_pair *)q->desc; + addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : + *(const __be64 *)q->desc; + dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + struct device *dev = adap->pdev_dev; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (unmap) + unmap_sgl(dev, d->skb, d->sgl, q); + kfree_skb(d->skb); + d->skb = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + hw_cidx -= q->cidx; + return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + int avail = reclaimable(q); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the Tx lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adap, q, avail, unmap); + q->in_use -= avail; + } +} + +static inline int get_buf_size(const struct rx_sw_desc *d) +{ +#if FL_PG_ORDER > 0 + return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : + PAGE_SIZE; +#else + return PAGE_SIZE; +#endif +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @adap: the adapter + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) +{ + while (n--) { + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + put_page(d->page); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @adap: the adapter + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) +{ + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 8) { + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | + QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, + dma_addr_t mapping) +{ + sd->page = pg; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl - refill an SGE Rx buffer ring + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, + gfp_t gfp) +{ + struct page *pg; + dma_addr_t mapping; + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + + gfp |= __GFP_NOWARN; /* failures are expected */ + +#if FL_PG_ORDER > 0 + /* + * Prefer large buffers + */ + while (n) { + pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); + if (unlikely(!pg)) { + q->large_alloc_failed++; + break; /* fall back to single pages */ + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + __free_pages(pg, FL_PG_ORDER); + goto out; /* do not try small pages for this error */ + } + mapping |= RX_LARGE_BUF; + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + n--; + } +#endif + + while (n--) { + pg = __netdev_alloc_page(adap->port[0], gfp); + if (unlikely(!pg)) { + q->alloc_failed++; + break; + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + netdev_free_page(adap->port[0], pg); + goto out; + } + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(q))) { + smp_wmb(); + set_bit(q->cntxt_id, adap->sge.starving_fl); + } + + return cred; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size) +{ + size_t len = nelem * elem_size + stat_size; + void *s = NULL; + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size) { + s = kcalloc(nelem, sw_size, GFP_KERNEL); + + if (!s) { + dma_free_coherent(dev, len, p, *phys); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + memset(p, 0, len); + return p; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n > SGE_MAX_WR_LEN / 8); + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @skb: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + if (skb_shinfo(skb)->gso_size) + flits += 2; + return flits; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + return flits_to_desc(calc_tx_flits(skb)); +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of bus addresses for the SGL elements + * + * Generates a gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * right after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)q->stat; + memcpy(q->desc, (u8 *)buf + part0, part1); + end = (void *)q->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) +{ + wmb(); /* write descriptors before telling HW */ + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into Tx descriptors + * @skb: the packet + * @q: the Tx queue where the packet will be inlined + * @pos: starting position in the Tx queue where to inline the packet + * + * Inline a packet's contents directly into Tx descriptors, starting at + * the given position within the Tx DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, + void *pos) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, q->desc, skb->len - left); + pos = (void *)q->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +static void eth_txq_stop(struct sge_eth_txq *q) +{ + netif_tx_stop_queue(q->txq); + q->q.stops++; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adap; + struct sge_eth_txq *q; + const struct port_info *pi; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { +out_free: dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb_get_queue_mapping(skb); + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, true); + + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + + if (unlikely(credits < 0)) { + eth_txq_stop(q); + dev_err(adap->pdev_dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { + q->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + eth_txq_stop(q); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + wr = (void *)&q->q.desc[q->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = cpu_to_be64(0); + end = (u64 *)wr + flits; + + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso *lso = (void *)wr; + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(sizeof(*lso))); + lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); + lso->len = htonl(skb->len); + cpl = (void *)(lso + 1); + cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len); + q->tso++; + q->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + q->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + if (vlan_tx_tag_present(skb)) { + q->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); + cpl->pack = htons(0); + cpl->len = htons(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + if (is_eth_imm(skb)) { + inline_tx_skb(skb, &q->q, cpl + 1); + dev_kfree_skb(skb); + } else { + int last_desc; + + write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + skb_orphan(skb); + + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + } + + txq_advance(&q->q, ndesc); + + ring_tx_db(adap, &q->q, ndesc); + return NETDEV_TX_OK; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_CTRL_WR_LEN; +} + +/** + * ctrlq_check_stop - check if a control queue is full and should stop + * @q: the queue + * @wr: most recent WR written to the queue + * + * Check if a control queue has become full and should be stopped. + * We clean up control queue descriptors very lazily, only when we are out. + * If the queue is still full after reclaiming any completed descriptors + * we suspend it and have the last WR wake it up. + */ +static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) +{ + reclaim_completed_tx_imm(&q->q); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + + if (unlikely(!is_imm(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_DROP; + } + + ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); + spin_lock(&q->sendq.lock); + + if (unlikely(q->full)) { + skb->priority = ndesc; /* save for restart */ + __skb_queue_tail(&q->sendq, skb); + spin_unlock(&q->sendq.lock); + return NET_XMIT_CN; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) + ctrlq_check_stop(q, wr); + + ring_tx_db(q->adap, &q->q, ndesc); + spin_unlock(&q->sendq.lock); + + kfree_skb(skb); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @data: the control queue to restart + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + unsigned int written = 0; + struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + + spin_lock(&q->sendq.lock); + reclaim_completed_tx_imm(&q->q); + BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ + + while ((skb = __skb_dequeue(&q->sendq)) != NULL) { + struct fw_wr_hdr *wr; + unsigned int ndesc = skb->priority; /* previously saved */ + + /* + * Write descriptors and free skbs outside the lock to limit + * wait times. q->full is still set so new skbs will be queued. + */ + spin_unlock(&q->sendq.lock); + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + kfree_skb(skb); + + written += ndesc; + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + unsigned long old = q->q.stops; + + ctrlq_check_stop(q, wr); + if (q->q.stops != old) { /* suspended anew */ + spin_lock(&q->sendq.lock); + goto ringdb; + } + } + if (written > 16) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + spin_lock(&q->sendq.lock); + } + q->full = 0; +ringdb: if (written) + ring_tx_db(q->adap, &q->q, written); + spin_unlock(&q->sendq.lock); +} + +/** + * t4_mgmt_tx - send a management message + * @adap: the adapter + * @skb: the packet containing the management message + * + * Send a management message through control queue 0. + */ +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); + local_bh_enable(); + return ret; +} + +/** + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN; +} + +/** + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8U; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits + sgl_len(cnt); +} + +/** + * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion + * @adap: the adapter + * @q: the queue to stop + * + * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting + * inability to map packets. A periodic timer attempts to restart + * queues so marked. + */ +static void txq_stop_maperr(struct sge_ofld_txq *q) +{ + q->mapping_err++; + q->q.stops++; + set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); +} + +/** + * ofldtxq_stop - stop an offload Tx queue that has become full + * @q: the queue to stop + * @skb: the packet causing the queue to become full + * + * Stops an offload Tx queue that has become full and modifies the packet + * being written to request a wakeup. + */ +static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; + + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; +} + +/** + * service_ofldq - restart a suspended offload queue + * @q: the offload queue + * + * Services an offload Tx queue by moving packets from its packet queue + * to the HW Tx ring. The function starts and ends with the queue locked. + */ +static void service_ofldq(struct sge_ofld_txq *q) +{ + u64 *pos; + int credits; + struct sk_buff *skb; + unsigned int written = 0; + unsigned int flits, ndesc; + + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { + /* + * We drop the lock but leave skb on sendq, thus retaining + * exclusive access to the state of the queue. + */ + spin_unlock(&q->sendq.lock); + + reclaim_completed_tx(q->adap, &q->q, false); + + flits = skb->priority; /* previously saved */ + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + BUG_ON(credits < 0); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, skb); + + pos = (u64 *)&q->q.desc[q->q.pidx]; + if (is_ofld_imm(skb)) + inline_tx_skb(skb, &q->q, pos); + else if (map_skb(q->adap->pdev_dev, skb, + (dma_addr_t *)skb->head)) { + txq_stop_maperr(q); + spin_lock(&q->sendq.lock); + break; + } else { + int last_desc, hdr_len = skb_transport_offset(skb); + + memcpy(pos, skb->data, hdr_len); + write_sgl(skb, &q->q, (void *)pos + hdr_len, + pos + flits, hdr_len, + (dma_addr_t *)skb->head); +#ifdef CONFIG_NEED_DMA_MAP_STATE + skb->dev = q->adap->port[0]; + skb->destructor = deferred_unmap_destructor; +#endif + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + } + + txq_advance(&q->q, ndesc); + written += ndesc; + if (unlikely(written > 32)) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + + spin_lock(&q->sendq.lock); + __skb_unlink(skb, &q->sendq); + if (is_ofld_imm(skb)) + kfree_skb(skb); + } + if (likely(written)) + ring_tx_db(q->adap, &q->q, written); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ + spin_lock(&q->sendq.lock); + __skb_queue_tail(&q->sendq, skb); + if (q->sendq.qlen == 1) + service_ofldq(q); + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ofldq - restart a suspended offload queue + * @data: the offload queue to restart + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_ofldq(unsigned long data) +{ + struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + + spin_lock(&q->sendq.lock); + q->full = 0; /* the queue actually is completely empty now */ + service_ofldq(q); + spin_unlock(&q->sendq.lock); +} + +/** + * skb_txq - return the Tx queue an offload packet should use + * @skb: the packet + * + * Returns the Tx queue an offload packet should use as indicated by bits + * 1-15 in the packet's queue_mapping. + */ +static inline unsigned int skb_txq(const struct sk_buff *skb) +{ + return skb->queue_mapping >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Returns whether an offload packet should use an OFLD or a CTRL + * Tx queue as indicated by bit 0 in the packet's queue_mapping. + */ +static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->queue_mapping & 1; +} + +static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + unsigned int idx = skb_txq(skb); + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + return ofld_xmit(&adap->sge.ofldtxq[idx], skb); +} + +/** + * t4_ofld_send - send an offload packet + * @adap: the adapter + * @skb: the packet + * + * Sends an offload packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ofld_send(adap, skb); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_ofld_send - send an offload packet + * @dev: the net device + * @skb: the packet + * + * Sends an offload packet. This is an exported version of @t4_ofld_send, + * intended for ULDs. + */ +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_ofld_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_ofld_send); + +static inline void copy_frags(struct skb_shared_info *ssi, + const struct pkt_gl *gl, unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; + ssi->frags[0].size = gl->frags[0].size - offset; + ssi->nr_frags = gl->nfrags; + n = gl->nfrags - 1; + if (n) + memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer + * size, which is expected since buffers are at least PAGE_SIZEd. + * In this case packets up to RX_COPY_THRES have only one fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + skb = dev_alloc_skb(gl->tot_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = dev_alloc_skb(skb_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb_shinfo(skb), gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } +out: return skb; +} +EXPORT_SYMBOL(cxgb4_pktgl_to_skb); + +/** + * t4_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +void t4_pktgl_free(const struct pkt_gl *gl) +{ + int n; + const skb_frag_t *p; + + for (p = gl->frags, n = gl->nfrags - 1; n--; p++) + put_page(p->page); +} + +/* + * Process an MPS trace packet. Give it an unused protocol number so it won't + * be delivered to anyone and send it to the stack for capture. + */ +static noinline int handle_trace_pkt(struct adapter *adap, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + struct cpl_trace_pkt *p; + + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + return 0; + } + + p = (struct cpl_trace_pkt *)skb->data; + __skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + skb->protocol = htons(0xffff); + skb->dev = adap->port[0]; + netif_receive_skb(skb); + return 0; +} + +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); + skb->len = gl->tot_len - RX_PKT_PAD; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (unlikely(pkt->vlan_ex)) { + struct port_info *pi = netdev_priv(rxq->rspq.netdev); + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) { + ret = vlan_gro_frags(&rxq->rspq.napi, grp, + ntohs(pkt->vlan)); + goto stats; + } + } + ret = napi_gro_frags(&rxq->rspq.napi); +stats: if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + bool csum_ok; + struct sk_buff *skb; + struct port_info *pi; + const struct cpl_rx_pkt *pkt; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) + return handle_trace_pkt(q->adap, si); + + pkt = (void *)&rsp[1]; + csum_ok = pkt->csum_calc && !pkt->err_vec; + if ((pkt->l2info & htonl(RXF_TCP)) && + (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { + do_gro(rxq, si, pkt); + return 0; + } + + skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(si); + rxq->stats.rx_drops++; + return 0; + } + + __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ + skb->protocol = eth_type_trans(skb, q->netdev); + skb_record_rx_queue(skb, q->idx); + pi = netdev_priv(skb->dev); + rxq->stats.pkts++; + + if (csum_ok && (pi->rx_offload & RX_CSO) && + (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(pkt->vlan_ex)) { + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) + vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); + else + dev_kfree_skb_any(skb); + } else + netif_receive_skb(skb); + + return 0; +} + +/** + * restore_rx_bufs - put back a packet's Rx buffers + * @si: the packet gather list + * @q: the SGE free list + * @frags: number of FL buffers to restore + * + * Puts back on an FL the Rx buffers associated with @si. The buffers + * have already been unmapped and are left unmapped, we mark them so to + * prevent further unmapping attempts. + * + * This function undoes a series of @unmap_rx_buf calls when we find out + * that the current packet can't be processed right away afterall and we + * need to come back to it later. This is a very rare event and there's + * no effort to make this particularly efficient. + */ +static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, + int frags) +{ + struct rx_sw_desc *d; + + while (frags--) { + if (q->cidx == 0) + q->cidx = q->size - 1; + else + q->cidx--; + d = &q->sdesc[q->cidx]; + d->page = si->frags[frags].page; + d->dma_addr |= RX_UNMAPPED_BUF; + q->avail++; + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return RSPD_GEN(r->type_gen) == q->gen; +} + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (void *)q->cur_desc + q->iqe_len; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget) +{ + int ret, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl si; + const struct rx_sw_desc *rsd; + u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; + + if (len & RSPD_NEWBUF) { + if (likely(q->offset > 0)) { + free_rx_bufs(q->adap, &rxq->fl, 1); + q->offset = 0; + } + len &= RSPD_LEN; + } + si.tot_len = len; + + /* gather packet fragments */ + for (frags = 0, fp = si.frags; ; frags++, fp++) { + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(rsd); + fp->page = rsd->page; + fp->page_offset = q->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(q->adap, &rxq->fl); + } + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access. + */ + dma_sync_single_for_cpu(q->adap->pdev_dev, + get_buf_addr(rsd), + fp->size, DMA_FROM_DEVICE); + + si.va = page_address(si.frags[0].page) + + si.frags[0].page_offset; + prefetch(si.va); + + si.nfrags = frags + 1; + ret = q->handler(q, q->cur_desc, &si); + if (likely(ret == 0)) + q->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&si, &rxq->fl, frags); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + __refill_fl(q->adap, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int params; + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(q, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } else + params = QINTR_TIMER_IDX(7); + + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | + INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue. + */ +irqreturn_t t4_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *q = cookie; + + napi_schedule(&q->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adap) +{ + unsigned int credits; + const struct rsp_ctrl *rc; + struct sge_rspq *q = &adap->sge.intrq; + + spin_lock(&adap->sge.intrq_lock); + for (credits = 0; ; credits++) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { + unsigned int qid = ntohl(rc->pldbuflen_qid); + + napi_schedule(&adap->sge.ingr_map[qid]->napi); + } + + rspq_next(q); + } + + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | + INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + spin_unlock(&adap->sge.intrq_lock); + return credits; +} + +/* + * The MSI interrupt handler, which handles data events from SGE response queues + * as well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4_intr_msi(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_slow_intr_handler(adap); + process_intrq(adap); + return IRQ_HANDLED; +} + +/* + * Interrupt handler for legacy INTx interrupts. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt line. + */ +static irqreturn_t t4_intr_intx(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); + if (t4_slow_intr_handler(adap) | process_intrq(adap)) + return IRQ_HANDLED; + return IRQ_NONE; /* probably shared interrupt */ +} + +/** + * t4_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or INTx). + */ +irq_handler_t t4_intr_handler(struct adapter *adap) +{ + if (adap->flags & USING_MSIX) + return t4_sge_intr_msix; + if (adap->flags & USING_MSI) + return t4_intr_msi; + return t4_intr_intx; +} + +static void sge_rx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, cnt[2]; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) + for (m = s->starving_fl[i]; m; m &= m - 1) { + struct sge_eth_rxq *rxq; + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + if (fl_starving(fl)) { + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + + t4_write_reg(adap, SGE_DEBUG_INDEX, 13); + cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); + cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + + for (i = 0; i < 2; i++) + if (cnt[i] >= s->starve_thres) { + if (s->idma_state[i] || cnt[i] == 0xffffffff) + continue; + s->idma_state[i] = 1; + t4_write_reg(adap, SGE_DEBUG_INDEX, 11); + m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); + dev_warn(adap->pdev_dev, + "SGE idma%u starvation detected for " + "queue %lu\n", i, m & 0xffff); + } else if (s->idma_state[i]) + s->idma_state[i] = 0; + + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +static void sge_tx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, budget; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) + for (m = s->txq_maperr[i]; m; m &= m - 1) { + unsigned long id = __ffs(m) + i * BITS_PER_LONG; + struct sge_ofld_txq *txq = s->egr_map[id]; + + clear_bit(id, s->txq_maperr); + tasklet_schedule(&txq->qresume_tsk); + } + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *q = &s->ethtxq[i]; + + if (q->q.in_use && + time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && + __netif_tx_trylock(q->txq)) { + int avail = reclaimable(&q->q); + + if (avail) { + if (avail > budget) + avail = budget; + + free_tx_desc(adap, &q->q, avail, true); + q->q.in_use -= avail; + budget -= avail; + } + __netif_tx_unlock(q->txq); + } + + if (++i >= s->ethqsets) + i = 0; + } while (budget && i != s->ethtxq_rover); + s->ethtxq_rover = i; + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = roundup(iq->size, 16); + + iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, + &iq->phys_addr, NULL, 0); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | + FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + fl->size = roundup(fl->size, 8); + fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), &fl->addr, + &fl->sdesc, STAT_LEN); + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0PADEN); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | + FW_IQ_CMD_FL0FBMAX(3)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) + goto err; + + netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->size--; /* subtract status entry */ + iq->adap = adap; + iq->netdev = dev; + iq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + adap->sge.ingr_map[iq->cntxt_id] = iq; + + if (fl) { + fl->cntxt_id = htons(c.fl0id); + fl->avail = fl->pend_cred = 0; + fl->pidx = fl->cidx = 0; + fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; + adap->sge.egr_map[fl->cntxt_id] = fl; + refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); + } + return 0; + +fl_nomem: + ret = -ENOMEM; +err: + if (iq->desc) { + dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, + iq->desc, iq->phys_addr); + iq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->in_use = 0; + q->cidx = q->pidx = 0; + q->stops = q->restarts = 0; + q->stat = (void *)&q->desc[q->size]; + q->cntxt_id = id; + adap->sge.egr_map[id] = q; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | + FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | + FW_EQ_ETH_CMD_FBMAX(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH(5) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->txq = netdevq; + txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, nentries, + sizeof(struct tx_desc), 0, &txq->q.phys_addr, + NULL, 0); + if (!txq->q.desc) + return -ENOMEM; + + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | + FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | + FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | + FW_EQ_CTRL_CMD_FBMAX(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | + FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + txq->full = 0; + return 0; +} + +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_ofld_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | + FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | + FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_OFLD_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | + FW_EQ_OFLD_CMD_FBMAX(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | + FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + txq->full = 0; + txq->mapping_err = 0; + return 0; +} + +static void free_txq(struct adapter *adap, struct sge_txq *q) +{ + dma_free_coherent(adap->pdev_dev, + q->size * sizeof(struct tx_desc) + STAT_LEN, + q->desc, q->phys_addr); + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + adap->sge.ingr_map[rq->cntxt_id] = NULL; + t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, + 0xffff); + dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, + rq->desc, rq->phys_addr); + netif_napi_del(&rq->napi); + rq->netdev = NULL; + rq->cntxt_id = rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(adap, fl, fl->avail); + dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *eq = adap->sge.ethrxq; + struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_ofld_rxq *oq = adap->sge.ofldrxq; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + if (eq->rspq.desc) + free_rspq_fl(adap, &eq->rspq, &eq->fl); + if (etq->q.desc) { + t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + + /* clean up RDMA and iSCSI Rx queues */ + for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + + /* clean up offload Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { + struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; + + if (q->q.desc) { + tasklet_kill(&q->qresume_tsk); + t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); + free_tx_desc(adap, &q->q, q->q.in_use, false); + kfree(q->q.sdesc); + __skb_queue_purge(&q->sendq); + free_txq(adap, &q->q); + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + tasklet_kill(&cq->qresume_tsk); + t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); + __skb_queue_purge(&cq->sendq); + free_txq(adap, &cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + + if (adap->sge.intrq.desc) + free_rspq_fl(adap, &adap->sge.intrq, NULL); + + /* clear the reverse egress queue map */ + memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); +} + +void t4_sge_start(struct adapter *adap) +{ + adap->sge.ethtxq_rover = 0; + mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4_sge_stop - disable SGE operation + * @adap: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4_sge_stop(struct adapter *adap) +{ + int i; + struct sge *s = &adap->sge; + + if (in_interrupt()) /* actions below require waiting */ + return; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { + struct sge_ofld_txq *q = &s->ofldtxq[i]; + + if (q->q.desc) + tasklet_kill(&q->qresume_tsk); + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { + struct sge_ctrl_txq *cq = &s->ctrlq[i]; + + if (cq->q.desc) + tasklet_kill(&cq->qresume_tsk); + } +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request them individually. + */ +void t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + unsigned int fl_align_log = ilog2(FL_ALIGN); + + t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, + INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | + RXPKTCPLMODE | + (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); + t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, + HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); +#if FL_PG_ORDER > 0 + t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); +#endif + t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, + THRESHOLD_0(s->counter_val[0]) | + THRESHOLD_1(s->counter_val[1]) | + THRESHOLD_2(s->counter_val[2]) | + THRESHOLD_3(s->counter_val[3])); + t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); + t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); + t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_state[0] = s->idma_state[1] = 0; + spin_lock_init(&s->intrq_lock); +} diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c new file mode 100644 index 00000000000..a814a3afe12 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.c @@ -0,0 +1,3131 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/init.h> +#include <linux/delay.h> +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void) t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + dev_alert(adap->pdev_dev, + "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), + ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); +} + +static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +{ + dev_err(adap->pdev_dev, + "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); +} + +/** + * t4_wr_mbox_meat - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms + * to respond. @sleep_ok determines whether we may sleep while awaiting + * the response. If sleeping is allowed we use progressive backoff + * otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 + }; + + u32 v; + u64 res; + int i, ms, delay_idx; + const __be64 *p = cmd; + u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); + + if ((size & 15) || size > MBOX_LEN) + return -EINVAL; + + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + + if (v != MBOX_OWNER_DRV) + return v ? -EBUSY : -ETIMEDOUT; + + for (i = 0; i < size; i += 8) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); + + t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + v = t4_read_reg(adap, ctl_reg); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + if (!(v & MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, 0); + continue; + } + + res = t4_read_reg64(adap, data_reg); + if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = FW_CMD_RETVAL(EIO); + } else if (rpl) + get_mbox_rpl(adap, rpl, size / 8, data_reg); + + if (FW_CMD_RETVAL_GET((int)res)) + dump_mbox(adap, mbox, data_reg); + t4_write_reg(adap, ctl_reg, 0); + return -FW_CMD_RETVAL_GET((int)res); + } + } + + dump_mbox(adap, mbox, data_reg); + dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + return -ETIMEDOUT; +} + +/** + * t4_mc_read - read from MC through backdoor accesses + * @adap: the adapter + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) + return -EBUSY; + t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); + t4_write_reg(adap, MC_BIST_CMD_LEN, 64); + t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); + t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | + BIST_CMD_GAP(1)); + i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/** + * t4_edc_read - read from EDC through backdoor accesses + * @adap: the adapter + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + idx *= EDC_STRIDE; + if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) + return -EBUSY; + t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); + t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); + t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); + t4_write_reg(adap, EDC_BIST_CMD + idx, + BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); + i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +#define VPD_ENTRY(name, len) \ + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t4_vpd { + u8 id_tag; + u8 id_len[2]; + u8 id_data[ID_LEN]; + u8 vpdr_tag; + u8 vpdr_len[2]; + VPD_ENTRY(pn, 16); /* part number */ + VPD_ENTRY(ec, EC_LEN); /* EC level */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ + VPD_ENTRY(na, 12); /* MAC address base */ + VPD_ENTRY(port_type, 8); /* port types */ + VPD_ENTRY(gpio, 14); /* GPIO usage */ + VPD_ENTRY(cclk, 6); /* core clock */ + VPD_ENTRY(port_addr, 8); /* port MDIO addresses */ + VPD_ENTRY(rv, 1); /* csum */ + u32 pad; /* for multiple-of-4 sizing and alignment */ +}; + +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0 + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, bool enable) +{ + unsigned int v = enable ? 0xc : 0; + int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); + return ret < 0 ? ret : 0; +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int ret; + struct t4_vpd vpd; + u8 *q = (u8 *)&vpd, csum; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); + if (ret < 0) + return ret; + + for (csum = 0; q <= vpd.rv_data; q++) + csum += *q; + + if (csum) { + dev_err(adapter->pdev_dev, + "corrupted VPD EEPROM, actual csum %u\n", csum); + return -EINVAL; + } + + p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); + memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); + strim(p->id); + memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); + strim(p->ec); + memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); + strim(p->sn); + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_START_SEC = 8, /* first flash sector for FW */ + FW_END_SEC = 15, /* last flash sector for FW */ + FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, + FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_DATA, val); + t4_write_reg(adapter, SF_OP, lock | + cont | BYTECNT(byte_cnt - 1) | OP_WR); + return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t4_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int t4_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = flash_wait_op(adapter, 5, 1); + if (ret) + goto unlock; + + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *)buf + offset, n)) { + dev_err(adapter->pdev_dev, + "failed to correctly write the flash page at %#x\n", + addr); + return -EIO; + } + return 0; + +unlock: + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** + * get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, + tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if there's exact match, a negative error if the version could not be + * read or there's a major version mismatch, and a positive value if the + * expected major version is found but there's a minor version mismatch. + */ +int t4_check_fw_version(struct adapter *adapter) +{ + u32 api_vers[2]; + int ret, major, minor, micro; + + ret = get_fw_version(adapter, &adapter->params.fw_vers); + if (!ret) + ret = get_tp_version(adapter, &adapter->params.tp_vers); + if (!ret) + ret = t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), + 2, api_vers, 1); + if (ret) + return ret; + + major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); + minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); + micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); + memcpy(adapter->params.api_vers, api_vers, + sizeof(adapter->params.api_vers)); + + if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + dev_err(adapter->pdev_dev, + "card FW has major version %u, driver wants %u\n", + major, FW_VERSION_MAJOR); + return -EINVAL; + } + + if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + return 0; /* perfect match */ + + /* Minor/micro version mismatch. Report it but often it's OK. */ + return 1; +} + +/** + * t4_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + int ret = 0; + + while (start <= end) { + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 5, 500)) != 0) { + dev_err(adapter->pdev_dev, + "erase of flash sector %d failed, error %d\n", + start, ret); + break; + } + start++; + } + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * t4_load_fw - download firmware + * @adap: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) +{ + u32 csum; + int ret, addr; + unsigned int i; + u8 first_page[SF_PAGE_SIZE]; + const u32 *p = (const u32 *)fw_data; + const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + + if (!size) { + dev_err(adap->pdev_dev, "FW image has no data\n"); + return -EINVAL; + } + if (size & 511) { + dev_err(adap->pdev_dev, + "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + if (ntohs(hdr->len512) * 512 != size) { + dev_err(adap->pdev_dev, + "FW image size differs from size in FW header\n"); + return -EINVAL; + } + if (size > FW_MAX_SIZE) { + dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", + FW_MAX_SIZE); + return -EFBIG; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + dev_err(adap->pdev_dev, + "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); + if (ret) + goto out; + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + addr = FW_IMG_START; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); +out: + if (ret) + dev_err(adap->pdev_dev, "firmware download failed, error %d\n", + ret); + return ret; +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + +/** + * t4_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else + c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_restart_aneg - restart autonegotiation + * @adap: the adapter + * @mbox: mbox to use for the FW command + * @port: the port id + * + * Restarts autonegotiation for the selected port. + */ +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) +{ + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_set_vlan_accel - configure HW VLAN extraction + * @adap: the adapter + * @ports: bitmap of adapter ports to operate on + * @on: enable (1) or disable (0) HW VLAN extraction + * + * Enables or disables HW extraction of VLAN tags for the ports specified + * by @ports. @ports is a bitmap with the ith bit designating the port + * associated with the ith adapter channel. + */ +void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on) +{ + ports <<= VLANEXTENABLE_SHIFT; + t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t4_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occured. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = t4_read_reg(adapter, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + } else if (acts->msg && printk_ratelimit()) + dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + t4_write_reg(adapter, reg, status); + return fatal; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR, "PCI Rx write parity error", -1, 1 }, + { RPLPERR, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT, "PCI core secondary fault", -1, 1 }, + { PCIEPINT, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) + t4_fatal_err(adapter); +} + +/* + * SGE interrupt handler. + */ +static void sge_intr_handler(struct adapter *adapter) +{ + u64 v; + + static struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, + { 0 } + }; + + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | + ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); + if (v) { + dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", + (unsigned long long)v); + t4_write_reg(adapter, SGE_INT_CAUSE1, v); + t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); + } + + if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || + v != 0) + t4_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static struct intr_info cim_intr_info[] = { + { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, + { OBQPARERR, "CIM OBQ parity error", -1, 1 }, + { IBQPARERR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, + { 0 } + }; + static struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, + { ILLWRINT, "CIM illegal write", -1, 1 }, + { ILLRDINT, "CIM illegal read", -1, 1 }, + { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, + cim_intr_info) + + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, + cim_upintr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulprx_intr_info[] = { + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, + { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, + { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, + { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, + { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, + { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, + { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) + t4_fatal_err(adapter); +} + +/* + * LE interrupt handler. + */ +static void le_intr_handler(struct adapter *adap) +{ + static struct intr_info le_intr_info[] = { + { LIPMISS, "LE LIP miss", -1, 0 }, + { LIP0, "LE 0 LIP error", -1, 0 }, + { PARITYERR, "LE parity error", -1, 1 }, + { UNKNOWNCMD, "LE unknown command", -1, 1 }, + { REQQPARERR, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) + t4_fatal_err(adap); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_tx_intr_info[] = { + { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, + { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, + { BUBBLE, "MPS Tx underflow", -1, 1 }, + { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR, "MPS Tx framing error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_trc_intr_info[] = { + { FILTMEM, "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, + { MISCPERR, "MPS TRC misc parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, + mps_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, + mps_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, + mps_trc_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, + mps_stat_sram_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, + mps_stat_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, + mps_stat_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, + mps_cls_intr_info); + + t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | + RXINT | TXINT | STATINT); + t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ + if (fat) + t4_fatal_err(adapter); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) + +/* + * EDC/MC interrupt handler. + */ +static void mem_intr_handler(struct adapter *adapter, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); + } else { + addr = MC_INT_CAUSE; + cnt_addr = MC_ECC_STATUS; + } + + v = t4_read_reg(adapter, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE) + dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", + name[idx]); + if (v & ECC_CE_INT_CAUSE) { + u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); + + t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); + if (printk_ratelimit()) + dev_warn(adapter->pdev_dev, + "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE) + dev_alert(adapter->pdev_dev, + "%s uncorrectable ECC data error\n", name[idx]); + + t4_write_reg(adapter, addr, v); + if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) + t4_fatal_err(adapter); +} + +/* + * MA interrupt handler. + */ +static void ma_intr_handler(struct adapter *adap) +{ + u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); + + if (status & MEM_PERR_INT_CAUSE) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (status & MEM_WRAP_INT_CAUSE) { + v = t4_read_reg(adap, MA_INT_WRAP_STATUS); + dev_alert(adap->pdev_dev, "MA address wrap-around error by " + "client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_GET(v), + MEM_WRAP_ADDRESS_GET(v) << 4); + } + t4_write_reg(adap, MA_INT_CAUSE, status); + t4_fatal_err(adap); +} + +/* + * SMB interrupt handler. + */ +static void smb_intr_handler(struct adapter *adap) +{ + static struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) + t4_fatal_err(adap); +} + +/* + * NC-SI interrupt handler. + */ +static void ncsi_intr_handler(struct adapter *adap) +{ + static struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) + t4_fatal_err(adap); +} + +/* + * XGMAC interrupt handler. + */ +static void xgmac_intr_handler(struct adapter *adap, int port) +{ + u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + + v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", + port); + if (v & RXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", + port); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); + t4_fatal_err(adap); +} + +/* + * PL interrupt handler. + */ +static void pl_intr_handler(struct adapter *adap) +{ + static struct intr_info pl_intr_info[] = { + { FATALPERR, "T4 fatal parity error", -1, 1 }, + { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) + t4_fatal_err(adap); +} + +#define PF_INTR_MASK (PFSW | PFCIM) +#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ + EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ + CPL_SWITCH | SGE | ULP_TX) + +/** + * t4_slow_intr_handler - control path interrupt handler + * @adapter: the adapter + * + * T4 interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int t4_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); + + if (!(cause & GLBL_INTR_MASK)) + return 0; + if (cause & CIM) + cim_intr_handler(adapter); + if (cause & MPS) + mps_intr_handler(adapter); + if (cause & NCSI) + ncsi_intr_handler(adapter); + if (cause & PL) + pl_intr_handler(adapter); + if (cause & SMB) + smb_intr_handler(adapter); + if (cause & XGMAC0) + xgmac_intr_handler(adapter, 0); + if (cause & XGMAC1) + xgmac_intr_handler(adapter, 1); + if (cause & XGMAC_KR0) + xgmac_intr_handler(adapter, 2); + if (cause & XGMAC_KR1) + xgmac_intr_handler(adapter, 3); + if (cause & PCIE) + pcie_intr_handler(adapter); + if (cause & MC) + mem_intr_handler(adapter, MEM_MC); + if (cause & EDC0) + mem_intr_handler(adapter, MEM_EDC0); + if (cause & EDC1) + mem_intr_handler(adapter, MEM_EDC1); + if (cause & LE) + le_intr_handler(adapter); + if (cause & TP) + tp_intr_handler(adapter); + if (cause & MA) + ma_intr_handler(adapter); + if (cause & PM_TX) + pmtx_intr_handler(adapter); + if (cause & PM_RX) + pmrx_intr_handler(adapter); + if (cause & ULP_RX) + ulprx_intr_handler(adapter); + if (cause & CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & SGE) + sge_intr_handler(adapter); + if (cause & ULP_TX) + ulptx_intr_handler(adapter); + + /* Clear the interrupts just processed for which we are the master. */ + t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ + return 1; +} + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | + ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | + ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | + ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | + ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | + ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | + ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | + EGRESS_SIZE_ERR); + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_intr_clear - clear all interrupts + * @adapter: the adapter whose interrupts should be cleared + * + * Clears all interrupts. The caller must be a PCI function managing + * global interrupts. + */ +void t4_intr_clear(struct adapter *adapter) +{ + static const unsigned int cause_reg[] = { + SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + PCIE_NONFAT_ERR, PCIE_INT_CAUSE, + MC_INT_CAUSE, + MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, + EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), + CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, + MYPF_REG(CIM_PF_HOST_INT_CAUSE), + TP_INT_CAUSE, + ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, + PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, + MPS_RX_PERR_INT_CAUSE, + CPL_INTR_CAUSE, + MYPF_REG(PL_PF_INT_CAUSE), + PL_PL_INT_CAUSE, + LE_DB_INT_CAUSE, + }; + + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) + t4_write_reg(adapter, cause_reg[i], 0xffffffff); + + t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the response queue lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = htonl(FW_LEN16(cmd)); + + /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ + while (n > 0) { + int nq = min(n, 32); + __be32 *qp = &cmd.iq0_to_iq2; + + cmd.niqid = htons(nq); + cmd.startidx = htons(start); + + start += nq; + n -= nq; + + while (nq > 0) { + unsigned int v; + + v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + + *qp++ = htonl(v); + nq -= 3; + } + + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4_config_glbl_rss - configure the global RSS mode + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @mode: global RSS mode + * @flags: mode-specific flags + * + * Sets the global RSS mode. + */ +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags) +{ + struct fw_rss_glb_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.retval_len16 = htonl(FW_LEN16(c)); + if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + c.u.basicvirtual.mode_pkd = + htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); + } else + return -EINVAL; + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/* Read an RSS table row */ +static int rd_rss_row(struct adapter *adap, int row, u32 *val) +{ + t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); + return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, + 5, 0, val); +} + +/** + * t4_read_rss - read the contents of the RSS mapping table + * @adapter: the adapter + * @map: holds the contents of the RSS mapping table + * + * Reads the contents of the RSS hash->queue mapping table. + */ +int t4_read_rss(struct adapter *adapter, u16 *map) +{ + u32 val; + int i, ret; + + for (i = 0; i < RSS_NENTRIES / 2; ++i) { + ret = rd_rss_row(adapter, i, &val); + if (ret) + return ret; + *map++ = LKPTBLQUEUE0_GET(val); + *map++ = LKPTBLQUEUE1_GET(val); + } + return 0; +} + +/** + * t4_tp_get_tcp_stats - read TP's TCP MIB counters + * @adap: the adapter + * @v4: holds the TCP/IP counter values + * @v6: holds the TCP/IPv6 counter values + * + * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. + * Either @v4 or @v6 may be %NULL to skip the corresponding stats. + */ +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; + +#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) +#define STAT(x) val[STAT_IDX(x)] +#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) + + if (v4) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); + v4->tcpOutRsts = STAT(OUT_RST); + v4->tcpInSegs = STAT64(IN_SEG); + v4->tcpOutSegs = STAT64(OUT_SEG); + v4->tcpRetransSegs = STAT64(RXT_SEG); + } + if (v6) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); + v6->tcpOutRsts = STAT(OUT_RST); + v6->tcpInSegs = STAT64(IN_SEG); + v6->tcpOutSegs = STAT64(OUT_SEG); + v6->tcpRetransSegs = STAT64(RXT_SEG); + } +#undef STAT64 +#undef STAT +#undef STAT_IDX +} + +/** + * t4_tp_get_err_stats - read TP's error MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's error counters. + */ +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +{ + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, + 12, TP_MIB_MAC_IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, + 8, TP_MIB_TNL_CNG_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, + 4, TP_MIB_TNL_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, + 4, TP_MIB_OFD_VLN_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, + 4, TP_MIB_TCP_V6IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, + 2, TP_MIB_OFD_ARP_DROP); +} + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, TP_MTU_TABLE, + MTUINDEX(0xff) | MTUVALUE(i)); + v = t4_read_reg(adap, TP_MTU_TABLE); + mtus[i] = MTUVALUE_GET(v); + if (mtu_log) + mtu_log[i] = MTUWIDTH_GET(v); + } +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | + MTUWIDTH(log2) | MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_set_trace_filter - configure one of the tracing filters + * @adap: the adapter + * @tp: the desired trace filter parameters + * @idx: which filter to configure + * @enable: whether to enable or disable the filter + * + * Configures one of the tracing filters available in HW. If @enable is + * %0 @tp is not examined and may be %NULL. + */ +int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, + int idx, int enable) +{ + int i, ofst = idx * 4; + u32 data_reg, mask_reg, cfg; + u32 multitrc = TRCMULTIFILTER; + + if (!enable) { + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + goto out; + } + + if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || + tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || + tp->snap_len > 9600 || (idx && tp->snap_len > 256)) + return -EINVAL; + + if (tp->snap_len > 256) { /* must be tracer 0 */ + if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) + return -EINVAL; /* other tracers are enabled */ + multitrc = 0; + } else if (idx) { + i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); + if (TFCAPTUREMAX_GET(i) > 256 && + (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) + return -EINVAL; + } + + /* stop the tracer we'll be changing */ + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + + /* disable tracing globally if running in the wrong single/multi mode */ + cfg = t4_read_reg(adap, MPS_TRC_CFG); + if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { + t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); + t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + msleep(1); + if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) + return -ETIMEDOUT; + } + /* + * At this point either the tracing is enabled and in the right mode or + * disabled. + */ + + idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); + data_reg = MPS_TRC_FILTER0_MATCH + idx; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + t4_write_reg(adap, data_reg, tp->data[i]); + t4_write_reg(adap, mask_reg, ~tp->mask[i]); + } + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, + TFCAPTUREMAX(tp->snap_len) | + TFMINPKTSIZE(tp->min_len)); + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, + TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | + TFPORT(tp->port) | TFEN | + (tp->invert ? TFINVERTMATCH : 0)); + + cfg &= ~TRCMULTIFILTER; + t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); +out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + return 0; +} + +/** + * t4_get_trace_filter - query one of the tracing filters + * @adap: the adapter + * @tp: the current trace filter parameters + * @idx: which trace filter to query + * @enabled: non-zero if the filter is enabled + * + * Returns the current settings of one of the HW tracing filters. + */ +void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, + int *enabled) +{ + u32 ctla, ctlb; + int i, ofst = idx * 4; + u32 data_reg, mask_reg; + + ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); + ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); + + *enabled = !!(ctla & TFEN); + tp->snap_len = TFCAPTUREMAX_GET(ctlb); + tp->min_len = TFMINPKTSIZE_GET(ctlb); + tp->skip_ofst = TFOFFSET_GET(ctla); + tp->skip_len = TFLENGTH_GET(ctla); + tp->invert = !!(ctla & TFINVERTMATCH); + tp->port = TFPORT_GET(ctla); + + ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; + data_reg = MPS_TRC_FILTER0_MATCH + ofst; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + tp->mask[i] = ~t4_read_reg(adap, mask_reg); + tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; + } +} + +/** + * get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +static unsigned int get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_lb_stats - collect loopback port statistics + * @adap: the adapter + * @idx: the loopback port index + * @p: the stats structure to fill + * + * Return HW statistics for the given loopback port. + */ +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->octets = GET_STAT(BYTES); + p->frames = GET_STAT(FRAMES); + p->bcast_frames = GET_STAT(BCAST); + p->mcast_frames = GET_STAT(MCAST); + p->ucast_frames = GET_STAT(UCAST); + p->error_frames = GET_STAT(ERROR); + + p->frames_64 = GET_STAT(64B); + p->frames_65_127 = GET_STAT(65B_127B); + p->frames_128_255 = GET_STAT(128B_255B); + p->frames_256_511 = GET_STAT(256B_511B); + p->frames_512_1023 = GET_STAT(512B_1023B); + p->frames_1024_1518 = GET_STAT(1024B_1518B); + p->frames_1519_max = GET_STAT(1519B_MAX); + p->drop = t4_read_reg(adap, PORT_REG(idx, + MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); + + p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; + p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; + p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; + p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; + p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; + p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; + p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; + p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_wol_magic_enable - enable/disable magic packet WoL + * @adap: the adapter + * @port: the physical port index + * @addr: MAC address expected in magic packets, %NULL to disable + * + * Enables/disables magic packet wake-on-LAN for the selected port. + */ +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr) +{ + if (addr) { + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), + (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | addr[5]); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), + (addr[0] << 8) | addr[1]); + } + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, + addr ? MAGICEN : 0); +} + +/** + * t4_wol_pat_enable - enable/disable pattern-based WoL + * @adap: the adapter + * @port: the physical port index + * @map: bitmap of which HW pattern filters to set + * @mask0: byte mask for bytes 0-63 of a packet + * @mask1: byte mask for bytes 64-127 of a packet + * @crc: Ethernet CRC for selected bytes + * @enable: enable/disable switch + * + * Sets the pattern filters indicated in @map to mask out the bytes + * specified in @mask0/@mask1 in received packets and compare the CRC of + * the resulting packet against @crc. If @enable is %true pattern-based + * WoL is enabled, otherwise disabled. + */ +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable) +{ + int i; + + if (!enable) { + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), + PATEN, 0); + return 0; + } + if (map > 0xff) + return -EINVAL; + +#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) + + t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); + t4_write_reg(adap, EPIO_REG(DATA2), mask1); + t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); + + for (i = 0; i < NWOL_PAT; i++, map >>= 1) { + if (!(map & 1)) + continue; + + /* write byte masks */ + t4_write_reg(adap, EPIO_REG(DATA0), mask0); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + + /* write CRC */ + t4_write_reg(adap, EPIO_REG(DATA0), crc); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + } +#undef EPIO_REG + + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); + return 0; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).retval_len16 = htonl(FW_LEN16(var)); \ +} while (0) + +/** + * t4_mdio_rd - read a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to read + * @valp: where to store the value + * + * Issues a FW command through the given mailbox to read a PHY register. + */ +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp) +{ + int ret; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + *valp = ntohs(c.u.mdio.rval); + return ret; +} + +/** + * t4_mdio_wr - write a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to write + * @valp: value to write + * + * Issues a FW command through the given mailbox to write a PHY register. + */ +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + c.u.mdio.rval = htons(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state + * + * Issues a command to establish communication with FW. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + + INIT_CMD(c, HELLO, WRITE); + c.err_to_mbasyncnot = htonl( + FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | + FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0 && state) { + u32 v = ntohl(c.err_to_mbasyncnot); + if (v & FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else if (v & FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else + *state = DEV_STATE_UNINIT; + } + return ret; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_init_cmd - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_early_init(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + INIT_CMD(c, RESET, WRITE); + c.val = htonl(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + int i, ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + for (i = 0; i < nparams; i++, p += 2) + *p = htonl(*params++); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = ntohl(*p); + return ret; +} + +/** + * t4_set_params - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + while (nparams--) { + *p++ = htonl(*params++); + *p++ = htonl(*val++); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_cfg_pfvf - configure PF/VF resource limits + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF being configured + * @vf: the VF being configured + * @txq: the max number of egress queues + * @txq_eth_ctrl: the max number of egress Ethernet or control queues + * @rxqi: the max number of interrupt-capable ingress queues + * @rxq: the max number of interruptless ingress queues + * @tc: the PCI traffic class + * @vi: the max number of virtual interfaces + * @cmask: the channel access rights mask for the PF/VF + * @pmask: the port access rights mask for the PF/VF + * @nexact: the maximum number of exact MPS filters + * @rcaps: read capabilities + * @wxcaps: write/execute capabilities + * + * Configures resource limits and capabilities for a physical or virtual + * function. + */ +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) +{ + struct fw_pfvf_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | + FW_PFVF_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | + FW_PFVF_CMD_NIQ(rxq)); + c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | + FW_PFVF_CMD_PMASK(pmask) | + FW_PFVF_CMD_NEQ(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | + FW_PFVF_CMD_NEXACTF(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | + FW_PFVF_CMD_WX_CAPS(wxcaps) | + FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_vi - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + } + } + if (rss_size) + *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); + return ntohs(c.viid_pkd); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | + FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); + c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_RXMODE_MTU_NO_CHG; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p; + + if (naddr > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16((naddr + 2) / 2)); + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + + if (idx) + idx[i] = index >= NEXACT_MAC ? 0xffff : index; + if (index < NEXACT_MAC) + ret++; + else if (hash) + *hash |= (1 << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. @idx can be -1 if the address is a new addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_SMAC_RESULT(mode) | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + if (ret >= NEXACT_MAC) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_set_addr_hash - program the MAC inexact-match hash filter + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(1)); + c.u.hash.hashvec = cpu_to_be64(vec); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_identify_port - identify a VI's port by blinking its LED + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd c; + + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.blinkdur = htons(nblinks); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | + FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | + FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | + FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ofld_eq_free - free an offload egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ofld_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | + FW_EQ_OFLD_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + if (opcode == FW_PORT_CMD) { /* link/module state change message */ + int speed = 0, fc = 0; + const struct fw_port_cmd *p = (void *)rpl; + int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int port = adap->chan_map[chan]; + struct port_info *pi = adap2pinfo(adap, port); + struct link_config *lc = &pi->link_cfg; + u32 stat = ntohl(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + + if (stat & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4_os_link_changed(adap, port, link_ok); + } + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, port); + } + } + return 0; +} + +static void __devinit get_pci_mode(struct adapter *adapter, + struct pci_params *p) +{ + u16 val; + u32 pcie_cap = pci_pcie_cap(adapter->pdev); + + if (pcie_cap) { + pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, + &val); + p->speed = val & PCI_EXP_LNKSTA_CLS; + p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; + } +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +static int __devinit wait_dev_ready(struct adapter *adap) +{ + if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) + return 0; + msleep(500); + return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * @reset: if true perform a HW reset + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int __devinit t4_prep_adapter(struct adapter *adapter) +{ + int ret; + + ret = wait_dev_ready(adapter); + if (ret < 0) + return ret; + + get_pci_mode(adapter, &adapter->params.pci); + adapter->params.rev = t4_read_reg(adapter, PL_REV); + + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + return 0; +} + +int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + + for_each_port(adap, i) { + unsigned int rss_size; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = htonl( + FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->lport = j; + p->rss_size = rss_size; + memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); + + ret = ntohl(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? + FW_PORT_CMD_MDIOADDR_GET(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); + + init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h new file mode 100644 index 00000000000..025623285c9 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.h @@ -0,0 +1,100 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +#include <linux/types.h> + +enum { + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + NEXACT_MAC = 336, /* # of exact MAC address filters */ + L2T_SIZE = 4096, /* # of L2T entries */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NWOL_PAT = 8, /* # of WoL patterns */ + WOL_PAT_LEN = 128, /* length of WoL patterns */ +}; + +enum { + SF_PAGE_SIZE = 256, /* serial flash page size */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ + SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ +}; + +enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ + +enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + }; +}; + +#define RSPD_NEWBUF 0x80000000U +#define RSPD_LEN 0x7fffffffU + +#define RSPD_GEN(x) ((x) >> 7) +#define RSPD_TYPE(x) (((x) >> 4) & 3) + +#define QINTR_CNT_EN 0x1 +#define QINTR_TIMER_IDX(x) ((x) << 1) +#endif /* __T4_HW_H */ diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h new file mode 100644 index 00000000000..fdb11744314 --- /dev/null +++ b/drivers/net/cxgb4/t4_msg.h @@ -0,0 +1,664 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_MSG_H +#define __T4_MSG_H + +#include <linux/types.h> + +enum { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_L2T_WRITE_REQ = 0x12, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PASS_OPEN_RPL = 0x24, + CPL_ACT_OPEN_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_ABORT_REQ_RSS = 0x2B, + CPL_ABORT_RPL_RSS = 0x2D, + + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RX_DATA = 0x39, + CPL_SET_TCB_RPL = 0x3A, + CPL_RX_PKT = 0x3B, + CPL_RX_DDP_COMPLETE = 0x3F, + + CPL_ACT_ESTABLISH = 0x40, + CPL_PASS_ESTABLISH = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_PASS_ACCEPT_REQ = 0x44, + + CPL_RDMA_READ_REQ = 0x60, + + CPL_PASS_OPEN_REQ6 = 0x81, + CPL_ACT_OPEN_REQ6 = 0x83, + + CPL_RDMA_TERMINATE = 0xA2, + CPL_RDMA_WRITE = 0xA4, + CPL_SGE_EGR_UPDATE = 0xA5, + + CPL_TRACE_PKT = 0xB0, + + CPL_FW4_MSG = 0xC0, + CPL_FW4_PLD = 0xC1, + CPL_FW4_ACK = 0xC3, + + CPL_FW6_MSG = 0xE0, + CPL_FW6_PLD = 0xE1, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, + + NUM_CPL_CMDS +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_BAD_LENGTH = 15, + CPL_ERR_BAD_ROUTE = 18, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST_SYNRECV = 21, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_IWARP_FLM = 50, +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_FCOE = 6, +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCP = 0, + TX_CSUM_UDP = 1, + TX_CSUM_CRC16 = 4, + TX_CSUM_CRC32 = 5, + TX_CSUM_CRC32C = 6, + TX_CSUM_FCOE = 7, + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, + TX_CSUM_UDPIP6 = 11, + TX_CSUM_IP = 12, +}; + +union opcode_tid { + __be32 opcode_tid; + u8 opcode; +}; + +#define CPL_OPCODE(x) ((x) << 24) +#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) +#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) + +/* partitioning of TID fields that also carry a queue id */ +#define GET_TID_TID(x) ((x) & 0x3fff) +#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) +#define TID_QID(x) ((x) << 14) + +struct rss_header { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 channel:2; + u8 filter_hit:1; + u8 filter_tid:1; + u8 hash_type:2; + u8 ipv6:1; + u8 send2fw:1; +#else + u8 send2fw:1; + u8 ipv6:1; + u8 hash_type:2; + u8 filter_tid:1; + u8 filter_hit:1; + u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; +#define TX_CHAN(x) ((x) << 2) +#define DELACK(x) ((x) << 5) +#define ULP_MODE(x) ((x) << 8) +#define RCV_BUFSIZ(x) ((x) << 12) +#define DSCP(x) ((x) << 22) +#define SMAC_SEL(x) ((u64)(x) << 28) +#define L2T_IDX(x) ((u64)(x) << 36) +#define NAGLE(x) ((u64)(x) << 49) +#define WND_SCALE(x) ((u64)(x) << 50) +#define KEEP_ALIVE(x) ((u64)(x) << 54) +#define MSS_IDX(x) ((u64)(x) << 60) + __be64 opt1; +#define SYN_RSS_ENABLE (1 << 0) +#define SYN_RSS_QUEUE(x) ((x) << 2) +#define CONN_POLICY_ASK (1 << 22) +}; + +struct cpl_pass_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be64 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; +#define RSS_QUEUE(x) ((x) << 0) +#define RSS_QUEUE_VALID (1 << 10) +#define RX_COALESCE_VALID(x) ((x) << 11) +#define RX_COALESCE(x) ((x) << 12) +#define TX_QUEUE(x) ((x) << 23) +#define RX_CHANNEL(x) ((x) << 26) +#define WND_SCALE_EN(x) ((x) << 28) +#define TSTAMPS_EN(x) ((x) << 29) +#define SACK_EN(x) ((x) << 30) + __be64 opt0; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + __be32 atid_status; +#define GET_AOPEN_STATUS(x) ((x) & 0xff) +#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) +}; + +struct cpl_pass_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_stid; +#define GET_POPEN_TID(x) ((x) & 0xffffff) +#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) + __be16 mac_idx; + __be16 tcp_opt; +#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_act_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_atid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define QUEUENO(x) ((x) << 0) +#define REPLY_CHAN(x) ((x) << 14) +#define NO_REPLY(x) ((x) << 15) + __be16 cookie; +}; + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; +#define TCB_WORD(x) ((x) << 0) +#define TCB_COOKIE(x) ((x) << 5) + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + __be16 rsvd; + u8 cookie; + u8 status; + __be64 oldval; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listsvr_req { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define LISTSVR_IPV6 (1 << 14) + __be16 rsvd; +}; + +struct cpl_close_listsvr_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; +#define TXPKT_VF(x) ((x) << 0) +#define TXPKT_PF(x) ((x) << 8) +#define TXPKT_VF_VLD (1 << 11) +#define TXPKT_OVLAN_IDX(x) ((x) << 12) +#define TXPKT_INTF(x) ((x) << 16) +#define TXPKT_INS_OVLAN (1 << 21) +#define TXPKT_OPCODE(x) ((x) << 24) + __be16 pack; + __be16 len; + __be64 ctrl1; +#define TXPKT_CSUM_END(x) ((x) << 12) +#define TXPKT_CSUM_START(x) ((x) << 20) +#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) +#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) +#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) +#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) +#define TXPKT_VLAN(x) ((u64)(x) << 44) +#define TXPKT_VLAN_VLD (1ULL << 60) +#define TXPKT_IPCSUM_DIS (1ULL << 62) +#define TXPKT_L4CSUM_DIS (1ULL << 63) +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +#define cpl_tx_pkt_xt cpl_tx_pkt + +struct cpl_tx_pkt_lso { + WR_HDR; + __be32 lso_ctrl; +#define LSO_TCPHDR_LEN(x) ((x) << 0) +#define LSO_IPHDR_LEN(x) ((x) << 4) +#define LSO_ETHHDR_LEN(x) ((x) << 16) +#define LSO_IPV6(x) ((x) << 20) +#define LSO_LAST_SLICE (1 << 22) +#define LSO_FIRST_SLICE (1 << 23) +#define LSO_OPCODE(x) ((x) << 24) + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_iscsi_hdr { + union opcode_tid ot; + __be16 pdu_len_ddp; +#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) +#define ISCSI_DDP (1 << 15) + __be16 len; + __be32 seq; + __be16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 dack_mode:2; + u8 psh:1; + u8 heartbeat:1; + u8 ddp_off:1; + u8 :3; +#else + u8 :3; + u8 ddp_off:1; + u8 heartbeat:1; + u8 psh:1; + u8 dack_mode:2; +#endif + u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +#define RX_CREDITS(x) ((x) << 0) +#define RX_FORCE_ACK(x) ((x) << 28) +}; + +struct cpl_rx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_calc:1; + u8 ipmi_pkt:1; + u8 vlan_ex:1; + u8 ip_frag:1; +#else + u8 ip_frag:1; + u8 vlan_ex:1; + u8 ipmi_pkt:1; + u8 csum_calc:1; + u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; +#define RXF_UDP (1 << 22) +#define RXF_TCP (1 << 23) + __be16 hdr_len; + __be16 err_vec; +}; + +struct cpl_trace_pkt { + u8 opcode; + u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 runt:4; + u8 filter_hit:4; + u8 :6; + u8 err:1; + u8 trunc:1; +#else + u8 filter_hit:4; + u8 runt:4; + u8 trunc:1; + u8 err:1; + u8 :6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; +#define L2T_W_INFO(x) ((x) << 2) +#define L2T_W_PORT(x) ((x) << 8) +#define L2T_W_NOREPLY(x) ((x) << 15) + __be16 l2t_idx; + __be16 vlan; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rdma_terminate { + union opcode_tid ot; + __be16 rsvd; + __be16 len; +}; + +struct cpl_sge_egr_update { + __be32 opcode_qid; +#define EGR_QID(x) ((x) & 0x1FFFF) + __be16 cidx; + __be16 pidx; +}; + +struct cpl_fw4_pld { + u8 opcode; + u8 rsvd0[3]; + u8 type; + u8 rsvd1; + __be16 len; + __be64 data; + __be64 rsvd2; +}; + +struct cpl_fw6_pld { + u8 opcode; + u8 rsvd[5]; + __be16 len; + __be64 data[4]; +}; + +struct cpl_fw4_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw4_ack { + union opcode_tid ot; + u8 credits; + u8 rsvd0[2]; + u8 seq_vld; + __be32 snd_nxt; + __be32 snd_una; + __be64 rsvd1; +}; + +struct cpl_fw6_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +enum { + ULP_TX_MEM_READ = 2, + ULP_TX_MEM_WRITE = 3, + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; +#define ULPTX_CMD(x) ((x) << 24) +#define ULPTX_NSGE(x) ((x) << 0) + __be32 len0; + __be64 addr0; + struct ulptx_sge_pair sge[0]; +}; + +struct ulp_mem_io { + WR_HDR; + __be32 cmd; +#define ULP_MEMIO_ORDER(x) ((x) << 23) + __be32 len16; /* command length */ + __be32 dlen; /* data length in 32-byte units */ +#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) + __be32 lock_addr; +#define ULP_MEMIO_ADDR(x) ((x) << 0) +#define ULP_MEMIO_LOCK(x) ((x) << 31) +}; + +#endif /* __T4_MSG_H */ diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h new file mode 100644 index 00000000000..5ed56483cbc --- /dev/null +++ b/drivers/net/cxgb4/t4_regs.h @@ -0,0 +1,878 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_REGS_H +#define __T4_REGS_H + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) +#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define SGE_PF_KDOORBELL 0x0 +#define QID_MASK 0xffff8000U +#define QID_SHIFT 15 +#define QID(x) ((x) << QID_SHIFT) +#define DBPRIO 0x00004000U +#define PIDX_MASK 0x00003fffU +#define PIDX_SHIFT 0 +#define PIDX(x) ((x) << PIDX_SHIFT) + +#define SGE_PF_GTS 0x4 +#define INGRESSQID_MASK 0xffff0000U +#define INGRESSQID_SHIFT 16 +#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) +#define TIMERREG_MASK 0x0000e000U +#define TIMERREG_SHIFT 13 +#define TIMERREG(x) ((x) << TIMERREG_SHIFT) +#define SEINTARM_MASK 0x00001000U +#define SEINTARM_SHIFT 12 +#define SEINTARM(x) ((x) << SEINTARM_SHIFT) +#define CIDXINC_MASK 0x00000fffU +#define CIDXINC_SHIFT 0 +#define CIDXINC(x) ((x) << CIDXINC_SHIFT) + +#define SGE_CONTROL 0x1008 +#define DCASYSTYPE 0x00080000U +#define RXPKTCPLMODE 0x00040000U +#define EGRSTATUSPAGESIZE 0x00020000U +#define PKTSHIFT_MASK 0x00001c00U +#define PKTSHIFT_SHIFT 10 +#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_MASK 0x00000380U +#define INGPCIEBOUNDARY_SHIFT 7 +#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) +#define INGPADBOUNDARY_MASK 0x00000070U +#define INGPADBOUNDARY_SHIFT 4 +#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) +#define EGRPCIEBOUNDARY_MASK 0x0000000eU +#define EGRPCIEBOUNDARY_SHIFT 1 +#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) +#define GLOBALENABLE 0x00000001U + +#define SGE_HOST_PAGE_SIZE 0x100c +#define HOSTPAGESIZEPF0_MASK 0x0000000fU +#define HOSTPAGESIZEPF0_SHIFT 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) + +#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) + +#define SGE_INT_CAUSE1 0x1024 +#define SGE_INT_CAUSE2 0x1030 +#define SGE_INT_CAUSE3 0x103c +#define ERR_FLM_DBP 0x80000000U +#define ERR_FLM_IDMA1 0x40000000U +#define ERR_FLM_IDMA0 0x20000000U +#define ERR_FLM_HINT 0x10000000U +#define ERR_PCIE_ERROR3 0x08000000U +#define ERR_PCIE_ERROR2 0x04000000U +#define ERR_PCIE_ERROR1 0x02000000U +#define ERR_PCIE_ERROR0 0x01000000U +#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U +#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U +#define ERR_INVALID_CIDX_INC 0x00200000U +#define ERR_ITP_TIME_PAUSED 0x00100000U +#define ERR_CPL_OPCODE_0 0x00080000U +#define ERR_DROPPED_DB 0x00040000U +#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U +#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U +#define ERR_BAD_DB_PIDX3 0x00008000U +#define ERR_BAD_DB_PIDX2 0x00004000U +#define ERR_BAD_DB_PIDX1 0x00002000U +#define ERR_BAD_DB_PIDX0 0x00001000U +#define ERR_ING_PCIE_CHAN 0x00000800U +#define ERR_ING_CTXT_PRIO 0x00000400U +#define ERR_EGR_CTXT_PRIO 0x00000200U +#define DBFIFO_HP_INT 0x00000100U +#define DBFIFO_LP_INT 0x00000080U +#define REG_ADDRESS_ERR 0x00000040U +#define INGRESS_SIZE_ERR 0x00000020U +#define EGRESS_SIZE_ERR 0x00000010U +#define ERR_INV_CTXT3 0x00000008U +#define ERR_INV_CTXT2 0x00000004U +#define ERR_INV_CTXT1 0x00000002U +#define ERR_INV_CTXT0 0x00000001U + +#define SGE_INT_ENABLE3 0x1040 +#define SGE_FL_BUFFER_SIZE0 0x1044 +#define SGE_FL_BUFFER_SIZE1 0x1048 +#define SGE_INGRESS_RX_THRESHOLD 0x10a0 +#define THRESHOLD_0_MASK 0x3f000000U +#define THRESHOLD_0_SHIFT 24 +#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) +#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) +#define THRESHOLD_1_MASK 0x003f0000U +#define THRESHOLD_1_SHIFT 16 +#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) +#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) +#define THRESHOLD_2_MASK 0x00003f00U +#define THRESHOLD_2_SHIFT 8 +#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) +#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) +#define THRESHOLD_3_MASK 0x0000003fU +#define THRESHOLD_3_SHIFT 0 +#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) +#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) + +#define SGE_TIMER_VALUE_0_AND_1 0x10b8 +#define TIMERVALUE0_MASK 0xffff0000U +#define TIMERVALUE0_SHIFT 16 +#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) +#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) +#define TIMERVALUE1_MASK 0x0000ffffU +#define TIMERVALUE1_SHIFT 0 +#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) +#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) + +#define SGE_TIMER_VALUE_2_AND_3 0x10bc +#define SGE_TIMER_VALUE_4_AND_5 0x10c0 +#define SGE_DEBUG_INDEX 0x10cc +#define SGE_DEBUG_DATA_HIGH 0x10d0 +#define SGE_DEBUG_DATA_LOW 0x10d4 +#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define PCIE_PF_CLI 0x44 +#define PCIE_INT_CAUSE 0x3004 +#define UNXSPLCPLERR 0x20000000U +#define PCIEPINT 0x10000000U +#define PCIESINT 0x08000000U +#define RPLPERR 0x04000000U +#define RXWRPERR 0x02000000U +#define RXCPLPERR 0x01000000U +#define PIOTAGPERR 0x00800000U +#define MATAGPERR 0x00400000U +#define INTXCLRPERR 0x00200000U +#define FIDPERR 0x00100000U +#define CFGSNPPERR 0x00080000U +#define HRSPPERR 0x00040000U +#define HREQPERR 0x00020000U +#define HCNTPERR 0x00010000U +#define DRSPPERR 0x00008000U +#define DREQPERR 0x00004000U +#define DCNTPERR 0x00002000U +#define CRSPPERR 0x00001000U +#define CREQPERR 0x00000800U +#define CCNTPERR 0x00000400U +#define TARTAGPERR 0x00000200U +#define PIOREQPERR 0x00000100U +#define PIOCPLPERR 0x00000080U +#define MSIXDIPERR 0x00000040U +#define MSIXDATAPERR 0x00000020U +#define MSIXADDRHPERR 0x00000010U +#define MSIXADDRLPERR 0x00000008U +#define MSIDATAPERR 0x00000004U +#define MSIADDRHPERR 0x00000002U +#define MSIADDRLPERR 0x00000001U + +#define PCIE_NONFAT_ERR 0x3010 +#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 +#define PCIEOFST_MASK 0xfffffc00U +#define BIR_MASK 0x00000300U +#define BIR_SHIFT 8 +#define BIR(x) ((x) << BIR_SHIFT) +#define WINDOW_MASK 0x000000ffU +#define WINDOW_SHIFT 0 +#define WINDOW(x) ((x) << WINDOW_SHIFT) + +#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 +#define RNPP 0x80000000U +#define RPCP 0x20000000U +#define RCIP 0x08000000U +#define RCCP 0x04000000U +#define RFTP 0x00800000U +#define PTRP 0x00100000U + +#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 +#define TPCP 0x40000000U +#define TNPP 0x20000000U +#define TFTP 0x10000000U +#define TCAP 0x08000000U +#define TCIP 0x04000000U +#define RCAP 0x02000000U +#define PLUP 0x00800000U +#define PLDN 0x00400000U +#define OTDD 0x00200000U +#define GTRP 0x00100000U +#define RDPE 0x00040000U +#define TDCE 0x00020000U +#define TDUE 0x00010000U + +#define MC_INT_CAUSE 0x7518 +#define ECC_UE_INT_CAUSE 0x00000004U +#define ECC_CE_INT_CAUSE 0x00000002U +#define PERR_INT_CAUSE 0x00000001U + +#define MC_ECC_STATUS 0x751c +#define ECC_CECNT_MASK 0xffff0000U +#define ECC_CECNT_SHIFT 16 +#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) +#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) +#define ECC_UECNT_MASK 0x0000ffffU +#define ECC_UECNT_SHIFT 0 +#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) +#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) + +#define MC_BIST_CMD 0x7600 +#define START_BIST 0x80000000U +#define BIST_CMD_GAP_MASK 0x0000ff00U +#define BIST_CMD_GAP_SHIFT 8 +#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) +#define BIST_OPCODE_MASK 0x00000003U +#define BIST_OPCODE_SHIFT 0 +#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) + +#define MC_BIST_CMD_ADDR 0x7604 +#define MC_BIST_CMD_LEN 0x7608 +#define MC_BIST_DATA_PATTERN 0x760c +#define BIST_DATA_TYPE_MASK 0x0000000fU +#define BIST_DATA_TYPE_SHIFT 0 +#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) + +#define MC_BIST_STATUS_RDATA 0x7688 + +#define MA_EXT_MEMORY_BAR 0x77c8 +#define EXT_MEM_SIZE_MASK 0x00000fffU +#define EXT_MEM_SIZE_SHIFT 0 +#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) + +#define MA_TARGET_MEM_ENABLE 0x77d8 +#define EXT_MEM_ENABLE 0x00000004U +#define EDRAM1_ENABLE 0x00000002U +#define EDRAM0_ENABLE 0x00000001U + +#define MA_INT_CAUSE 0x77e0 +#define MEM_PERR_INT_CAUSE 0x00000002U +#define MEM_WRAP_INT_CAUSE 0x00000001U + +#define MA_INT_WRAP_STATUS 0x77e4 +#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U +#define MEM_WRAP_ADDRESS_SHIFT 4 +#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) +#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU +#define MEM_WRAP_CLIENT_NUM_SHIFT 0 +#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) + +#define MA_PARITY_ERROR_STATUS 0x77f4 + +#define EDC_0_BASE_ADDR 0x7900 + +#define EDC_BIST_CMD 0x7904 +#define EDC_BIST_CMD_ADDR 0x7908 +#define EDC_BIST_CMD_LEN 0x790c +#define EDC_BIST_DATA_PATTERN 0x7910 +#define EDC_BIST_STATUS_RDATA 0x7928 +#define EDC_INT_CAUSE 0x7978 +#define ECC_UE_PAR 0x00000020U +#define ECC_CE_PAR 0x00000010U +#define PERR_PAR_CAUSE 0x00000008U + +#define EDC_ECC_STATUS 0x797c + +#define EDC_1_BASE_ADDR 0x7980 + +#define CIM_PF_MAILBOX_DATA 0x240 +#define CIM_PF_MAILBOX_CTRL 0x280 +#define MBMSGVALID 0x00000008U +#define MBINTREQ 0x00000004U +#define MBOWNER_MASK 0x00000003U +#define MBOWNER_SHIFT 0 +#define MBOWNER(x) ((x) << MBOWNER_SHIFT) +#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) + +#define CIM_PF_HOST_INT_CAUSE 0x28c +#define MBMSGRDYINT 0x00080000U + +#define CIM_HOST_INT_CAUSE 0x7b2c +#define TIEQOUTPARERRINT 0x00100000U +#define TIEQINPARERRINT 0x00080000U +#define MBHOSTPARERR 0x00040000U +#define MBUPPARERR 0x00020000U +#define IBQPARERR 0x0001f800U +#define IBQTP0PARERR 0x00010000U +#define IBQTP1PARERR 0x00008000U +#define IBQULPPARERR 0x00004000U +#define IBQSGELOPARERR 0x00002000U +#define IBQSGEHIPARERR 0x00001000U +#define IBQNCSIPARERR 0x00000800U +#define OBQPARERR 0x000007e0U +#define OBQULP0PARERR 0x00000400U +#define OBQULP1PARERR 0x00000200U +#define OBQULP2PARERR 0x00000100U +#define OBQULP3PARERR 0x00000080U +#define OBQSGEPARERR 0x00000040U +#define OBQNCSIPARERR 0x00000020U +#define PREFDROPINT 0x00000002U +#define UPACCNONZERO 0x00000001U + +#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 +#define EEPROMWRINT 0x40000000U +#define TIMEOUTMAINT 0x20000000U +#define TIMEOUTINT 0x10000000U +#define RSPOVRLOOKUPINT 0x08000000U +#define REQOVRLOOKUPINT 0x04000000U +#define BLKWRPLINT 0x02000000U +#define BLKRDPLINT 0x01000000U +#define SGLWRPLINT 0x00800000U +#define SGLRDPLINT 0x00400000U +#define BLKWRCTLINT 0x00200000U +#define BLKRDCTLINT 0x00100000U +#define SGLWRCTLINT 0x00080000U +#define SGLRDCTLINT 0x00040000U +#define BLKWREEPROMINT 0x00020000U +#define BLKRDEEPROMINT 0x00010000U +#define SGLWREEPROMINT 0x00008000U +#define SGLRDEEPROMINT 0x00004000U +#define BLKWRFLASHINT 0x00002000U +#define BLKRDFLASHINT 0x00001000U +#define SGLWRFLASHINT 0x00000800U +#define SGLRDFLASHINT 0x00000400U +#define BLKWRBOOTINT 0x00000200U +#define BLKRDBOOTINT 0x00000100U +#define SGLWRBOOTINT 0x00000080U +#define SGLRDBOOTINT 0x00000040U +#define ILLWRBEINT 0x00000020U +#define ILLRDBEINT 0x00000010U +#define ILLRDINT 0x00000008U +#define ILLWRINT 0x00000004U +#define ILLTRANSINT 0x00000002U +#define RSVDSPACEINT 0x00000001U + +#define TP_OUT_CONFIG 0x7d04 +#define VLANEXTENABLE_MASK 0x0000f000U +#define VLANEXTENABLE_SHIFT 12 + +#define TP_PARA_REG2 0x7d68 +#define MAXRXDATA_MASK 0xffff0000U +#define MAXRXDATA_SHIFT 16 +#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) + +#define TP_TIMER_RESOLUTION 0x7d90 +#define TIMERRESOLUTION_MASK 0x00ff0000U +#define TIMERRESOLUTION_SHIFT 16 +#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) + +#define TP_SHIFT_CNT 0x7dc0 + +#define TP_CCTRL_TABLE 0x7ddc +#define TP_MTU_TABLE 0x7de4 +#define MTUINDEX_MASK 0xff000000U +#define MTUINDEX_SHIFT 24 +#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) +#define MTUWIDTH_MASK 0x000f0000U +#define MTUWIDTH_SHIFT 16 +#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) +#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) +#define MTUVALUE_MASK 0x00003fffU +#define MTUVALUE_SHIFT 0 +#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) +#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) + +#define TP_RSS_LKP_TABLE 0x7dec +#define LKPTBLROWVLD 0x80000000U +#define LKPTBLQUEUE1_MASK 0x000ffc00U +#define LKPTBLQUEUE1_SHIFT 10 +#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE0_MASK 0x000003ffU +#define LKPTBLQUEUE0_SHIFT 0 +#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) +#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) + +#define TP_PIO_ADDR 0x7e40 +#define TP_PIO_DATA 0x7e44 +#define TP_MIB_INDEX 0x7e50 +#define TP_MIB_DATA 0x7e54 +#define TP_INT_CAUSE 0x7e74 +#define FLMTXFLSTEMPTY 0x40000000U + +#define TP_INGRESS_CONFIG 0x141 +#define VNIC 0x00000800U +#define CSUM_HAS_PSEUDO_HDR 0x00000400U +#define RM_OVLAN 0x00000200U +#define LOOKUPEVERYPKT 0x00000100U + +#define TP_MIB_MAC_IN_ERR_0 0x0 +#define TP_MIB_TCP_OUT_RST 0xc +#define TP_MIB_TCP_IN_SEG_HI 0x10 +#define TP_MIB_TCP_IN_SEG_LO 0x11 +#define TP_MIB_TCP_OUT_SEG_HI 0x12 +#define TP_MIB_TCP_OUT_SEG_LO 0x13 +#define TP_MIB_TCP_RXT_SEG_HI 0x14 +#define TP_MIB_TCP_RXT_SEG_LO 0x15 +#define TP_MIB_TNL_CNG_DROP_0 0x18 +#define TP_MIB_TCP_V6IN_ERR_0 0x28 +#define TP_MIB_TCP_V6OUT_RST 0x2c +#define TP_MIB_OFD_ARP_DROP 0x36 +#define TP_MIB_TNL_DROP_0 0x44 +#define TP_MIB_OFD_VLN_DROP_0 0x58 + +#define ULP_TX_INT_CAUSE 0x8dcc +#define PBL_BOUND_ERR_CH3 0x80000000U +#define PBL_BOUND_ERR_CH2 0x40000000U +#define PBL_BOUND_ERR_CH1 0x20000000U +#define PBL_BOUND_ERR_CH0 0x10000000U + +#define PM_RX_INT_CAUSE 0x8fdc +#define ZERO_E_CMD_ERROR 0x00400000U +#define PMRX_FRAMING_ERROR 0x003ffff0U +#define OCSPI_PAR_ERROR 0x00000008U +#define DB_OPTIONS_PAR_ERROR 0x00000004U +#define IESPI_PAR_ERROR 0x00000002U +#define E_PCMD_PAR_ERROR 0x00000001U + +#define PM_TX_INT_CAUSE 0x8ffc +#define PCMD_LEN_OVFL0 0x80000000U +#define PCMD_LEN_OVFL1 0x40000000U +#define PCMD_LEN_OVFL2 0x20000000U +#define ZERO_C_CMD_ERROR 0x10000000U +#define PMTX_FRAMING_ERROR 0x0ffffff0U +#define OESPI_PAR_ERROR 0x00000008U +#define ICSPI_PAR_ERROR 0x00000002U +#define C_PCMD_PAR_ERROR 0x00000001U + +#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MPS_CMN_CTL 0x9000 +#define NUMPORTS_MASK 0x00000003U +#define NUMPORTS_SHIFT 0 +#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) + +#define MPS_INT_CAUSE 0x9008 +#define STATINT 0x00000020U +#define TXINT 0x00000010U +#define RXINT 0x00000008U +#define TRCINT 0x00000004U +#define CLSINT 0x00000002U +#define PLINT 0x00000001U + +#define MPS_TX_INT_CAUSE 0x9408 +#define PORTERR 0x00010000U +#define FRMERR 0x00008000U +#define SECNTERR 0x00004000U +#define BUBBLE 0x00002000U +#define TXDESCFIFO 0x00001e00U +#define TXDATAFIFO 0x000001e0U +#define NCSIFIFO 0x00000010U +#define TPFIFO 0x0000000fU + +#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 +#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 +#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c + +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define MPS_TRC_CFG 0x9800 +#define TRCFIFOEMPTY 0x00000010U +#define TRCIGNOREDROPINPUT 0x00000008U +#define TRCKEEPDUPLICATES 0x00000004U +#define TRCEN 0x00000002U +#define TRCMULTIFILTER 0x00000001U + +#define MPS_TRC_RSS_CONTROL 0x9808 +#define RSSCONTROL_MASK 0x00ff0000U +#define RSSCONTROL_SHIFT 16 +#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) +#define QUEUENUMBER_MASK 0x0000ffffU +#define QUEUENUMBER_SHIFT 0 +#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 +#define TFINVERTMATCH 0x01000000U +#define TFPKTTOOLARGE 0x00800000U +#define TFEN 0x00400000U +#define TFPORT_MASK 0x003c0000U +#define TFPORT_SHIFT 18 +#define TFPORT(x) ((x) << TFPORT_SHIFT) +#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) +#define TFDROP 0x00020000U +#define TFSOPEOPERR 0x00010000U +#define TFLENGTH_MASK 0x00001f00U +#define TFLENGTH_SHIFT 8 +#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) +#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) +#define TFOFFSET_MASK 0x0000001fU +#define TFOFFSET_SHIFT 0 +#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) +#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 +#define TFMINPKTSIZE_MASK 0x01ff0000U +#define TFMINPKTSIZE_SHIFT 16 +#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) +#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) +#define TFCAPTUREMAX_MASK 0x00003fffU +#define TFCAPTUREMAX_SHIFT 0 +#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) +#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) + +#define MPS_TRC_INT_CAUSE 0x985c +#define MISCPERR 0x00000100U +#define PKTFIFO 0x000000f0U +#define FILTMEM 0x0000000fU + +#define MPS_TRC_FILTER0_MATCH 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE 0x9c80 +#define MPS_TRC_FILTER1_MATCH 0x9d00 +#define MPS_CLS_INT_CAUSE 0xd028 +#define PLERRENB 0x00000008U +#define HASHSRAM 0x00000004U +#define MATCHTCAM 0x00000002U +#define MATCHSRAM 0x00000001U + +#define MPS_RX_PERR_INT_CAUSE 0x11074 + +#define CPL_INTR_CAUSE 0x19054 +#define CIM_OP_MAP_PERR 0x00000020U +#define CIM_OVFL_ERROR 0x00000010U +#define TP_FRAMING_ERROR 0x00000008U +#define SGE_FRAMING_ERROR 0x00000004U +#define CIM_FRAMING_ERROR 0x00000002U +#define ZERO_SWITCH_ERROR 0x00000001U + +#define SMB_INT_CAUSE 0x19090 +#define MSTTXFIFOPARINT 0x00200000U +#define MSTRXFIFOPARINT 0x00100000U +#define SLVFIFOPARINT 0x00080000U + +#define ULP_RX_INT_CAUSE 0x19158 +#define ULP_RX_ISCSI_TAGMASK 0x19164 +#define ULP_RX_ISCSI_PSZ 0x19168 +#define HPZ3_MASK 0x0f000000U +#define HPZ3_SHIFT 24 +#define HPZ3(x) ((x) << HPZ3_SHIFT) +#define HPZ2_MASK 0x000f0000U +#define HPZ2_SHIFT 16 +#define HPZ2(x) ((x) << HPZ2_SHIFT) +#define HPZ1_MASK 0x00000f00U +#define HPZ1_SHIFT 8 +#define HPZ1(x) ((x) << HPZ1_SHIFT) +#define HPZ0_MASK 0x0000000fU +#define HPZ0_SHIFT 0 +#define HPZ0(x) ((x) << HPZ0_SHIFT) + +#define ULP_RX_TDDP_PSZ 0x19178 + +#define SF_DATA 0x193f8 +#define SF_OP 0x193fc +#define BUSY 0x80000000U +#define SF_LOCK 0x00000010U +#define SF_CONT 0x00000008U +#define BYTECNT_MASK 0x00000006U +#define BYTECNT_SHIFT 1 +#define BYTECNT(x) ((x) << BYTECNT_SHIFT) +#define OP_WR 0x00000001U + +#define PL_PF_INT_CAUSE 0x3c0 +#define PFSW 0x00000008U +#define PFSGE 0x00000004U +#define PFCIM 0x00000002U +#define PFMPS 0x00000001U + +#define PL_PF_INT_ENABLE 0x3c4 +#define PL_PF_CTL 0x3c8 +#define SWINT 0x00000001U + +#define PL_WHOAMI 0x19400 +#define SOURCEPF_MASK 0x00000700U +#define SOURCEPF_SHIFT 8 +#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) +#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) +#define ISVF 0x00000080U +#define VFID_MASK 0x0000007fU +#define VFID_SHIFT 0 +#define VFID(x) ((x) << VFID_SHIFT) +#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) + +#define PL_INT_CAUSE 0x1940c +#define ULP_TX 0x08000000U +#define SGE 0x04000000U +#define HMA 0x02000000U +#define CPL_SWITCH 0x01000000U +#define ULP_RX 0x00800000U +#define PM_RX 0x00400000U +#define PM_TX 0x00200000U +#define MA 0x00100000U +#define TP 0x00080000U +#define LE 0x00040000U +#define EDC1 0x00020000U +#define EDC0 0x00010000U +#define MC 0x00008000U +#define PCIE 0x00004000U +#define PMU 0x00002000U +#define XGMAC_KR1 0x00001000U +#define XGMAC_KR0 0x00000800U +#define XGMAC1 0x00000400U +#define XGMAC0 0x00000200U +#define SMB 0x00000100U +#define SF 0x00000080U +#define PL 0x00000040U +#define NCSI 0x00000020U +#define MPS 0x00000010U +#define MI 0x00000008U +#define DBG 0x00000004U +#define I2CM 0x00000002U +#define CIM 0x00000001U + +#define PL_INT_MAP0 0x19414 +#define PL_RST 0x19428 +#define PIORST 0x00000002U +#define PIORSTMODE 0x00000001U + +#define PL_PL_INT_CAUSE 0x19430 +#define FATALPERR 0x00000010U +#define PERRVFID 0x00000001U + +#define PL_REV 0x1943c + +#define LE_DB_CONFIG 0x19c04 +#define HASHEN 0x00100000U + +#define LE_DB_SERVER_INDEX 0x19c18 +#define LE_DB_ACT_CNT_IPV4 0x19c20 +#define LE_DB_ACT_CNT_IPV6 0x19c24 + +#define LE_DB_INT_CAUSE 0x19c3c +#define REQQPARERR 0x00010000U +#define UNKNOWNCMD 0x00008000U +#define PARITYERR 0x00000040U +#define LIPMISS 0x00000020U +#define LIP0 0x00000010U + +#define LE_DB_TID_HASHBASE 0x19df8 + +#define NCSI_INT_CAUSE 0x1a0d8 +#define CIM_DM_PRTY_ERR 0x00000100U +#define MPS_DM_PRTY_ERR 0x00000080U +#define TXFIFO_PRTY_ERR 0x00000002U +#define RXFIFO_PRTY_ERR 0x00000001U + +#define XGMAC_PORT_CFG2 0x1018 +#define PATEN 0x00040000U +#define MAGICEN 0x00020000U + +#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 +#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 + +#define XGMAC_PORT_EPIO_DATA0 0x10c0 +#define XGMAC_PORT_EPIO_DATA1 0x10c4 +#define XGMAC_PORT_EPIO_DATA2 0x10c8 +#define XGMAC_PORT_EPIO_DATA3 0x10cc +#define XGMAC_PORT_EPIO_OP 0x10d0 +#define EPIOWR 0x00000100U +#define ADDRESS_MASK 0x000000ffU +#define ADDRESS_SHIFT 0 +#define ADDRESS(x) ((x) << ADDRESS_SHIFT) + +#define XGMAC_PORT_INT_CAUSE 0x10dc +#endif /* __T4_REGS_H */ diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h new file mode 100644 index 00000000000..3393d05a388 --- /dev/null +++ b/drivers/net/cxgb4/t4fw_api.h @@ -0,0 +1,1580 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +#define FW_T4VF_SGE_BASE_ADDR 0x0000 +#define FW_T4VF_MPS_BASE_ADDR 0x0100 +#define FW_T4VF_PL_BASE_ADDR 0x0200 +#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 +#define FW_T4VF_CIM_BASE_ADDR 0x0300 + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_FLOWC_WR = 0x0a, + FW_OFLD_TX_DATA_WR = 0x0b, + FW_CMD_WR = 0x10, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_RI_RES_WR = 0x0c, + FW_RI_INIT_WR = 0x0d, + FW_RI_RDMA_WRITE_WR = 0x14, + FW_RI_SEND_WR = 0x15, + FW_RI_RDMA_READ_WR = 0x16, + FW_RI_RECV_WR = 0x17, + FW_RI_BIND_MW_WR = 0x18, + FW_RI_FR_NSMR_WR = 0x19, + FW_RI_INV_LSTAG_WR = 0x1a, + FW_LASTC2E_WR = 0x40 +}; + +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_WR_OP(x) ((x) << 24) +#define FW_WR_ATOMIC(x) ((x) << 23) +#define FW_WR_FLUSH(x) ((x) << 22) +#define FW_WR_COMPL(x) ((x) << 21) +#define FW_WR_IMMDLEN(x) ((x) << 0) + +#define FW_WR_EQUIQ (1U << 31) +#define FW_WR_EQUEQ (1U << 30) +#define FW_WR_FLOWID(x) ((x) << 8) +#define FW_WR_LEN16(x) ((x) << 0) + +struct fw_ulptx_wr { + __be32 op_to_compl; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_tp_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +enum fw_flowc_mnem { + FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ + FW_FLOWC_MNEM_CH, + FW_FLOWC_MNEM_PORT, + FW_FLOWC_MNEM_IQID, + FW_FLOWC_MNEM_SNDNXT, + FW_FLOWC_MNEM_RCVNXT, + FW_FLOWC_MNEM_SNDBUF, + FW_FLOWC_MNEM_MSS, +}; + +struct fw_flowc_mnemval { + u8 mnemonic; + u8 r4[3]; + __be32 val; +}; + +struct fw_flowc_wr { + __be32 op_to_nparams; +#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) + __be32 flowid_len16; + struct fw_flowc_mnemval mnemval[0]; +}; + +struct fw_ofld_tx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 tunnel_to_proxy; +#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) +#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) +#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) +#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) +#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) +#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) +#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) +}; + +struct fw_cmd_wr { + __be32 op_dma; +#define FW_CMD_WR_DMA (1U << 17) + __be32 len16_pkd; + __be64 cookie_daddr; +}; + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + u8 ethmacdst[6]; + u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +#define FW_CMD_MAX_TIMEOUT 3000 + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_MNGT_CMD = 0x11, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_EQ_OFLD_CMD = 0x21, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_ACL_MAC_CMD = 0x18, + FW_ACL_VLAN_CMD = 0x19, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_PORT_STATS_CMD = 0x1c, + FW_PORT_LB_STATS_CMD = 0x1d, + FW_PORT_TRACE_CMD = 0x1e, + FW_PORT_TRACE_MMAP_CMD = 0x1f, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_LASTC2E_CMD = 0x40, + FW_ERROR_CMD = 0x80, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PF = 0x01, + FW_CMD_CAP_DMAQ = 0x02, + FW_CMD_CAP_PORT = 0x04, + FW_CMD_CAP_PORTPROMISC = 0x08, + FW_CMD_CAP_PORTSTATS = 0x10, + FW_CMD_CAP_VF = 0x80, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_CMD_OP(x) ((x) << 24) +#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) +#define FW_CMD_REQUEST (1U << 23) +#define FW_CMD_READ (1U << 22) +#define FW_CMD_WRITE (1U << 21) +#define FW_CMD_EXEC (1U << 20) +#define FW_CMD_RAMASK(x) ((x) << 20) +#define FW_CMD_RETVAL(x) ((x) << 8) +#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) +#define FW_CMD_LEN16(x) ((x) << 0) + +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_FIRMWARE = 0x0001, + FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, + FW_LDST_ADDRSPC_SGE_INGC = 0x0009, + FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, + FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, + FW_LDST_ADDRSPC_TP_PIO = 0x0010, + FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, + FW_LDST_ADDRSPC_TP_MIB = 0x0012, + FW_LDST_ADDRSPC_MDIO = 0x0018, + FW_LDST_ADDRSPC_MPS = 0x0020, + FW_LDST_ADDRSPC_FUNC = 0x0028 +}; + +enum fw_ldst_mps_fid { + FW_LDST_MPS_ATRB, + FW_LDST_MPS_RPLC +}; + +enum fw_ldst_func_access_ctl { + FW_LDST_FUNC_ACC_CTL_VIID, + FW_LDST_FUNC_ACC_CTL_FID +}; + +enum fw_ldst_func_mod_index { + FW_LDST_FUNC_MPS +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; +#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_pkd; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + u8 access_ctl; + u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + } u; +}; + +#define FW_LDST_CMD_MSG(x) ((x) << 31) +#define FW_LDST_CMD_PADDR(x) ((x) << 8) +#define FW_LDST_CMD_MMD(x) ((x) << 0) +#define FW_LDST_CMD_FID(x) ((x) << 15) +#define FW_LDST_CMD_CTL(x) ((x) << 0) +#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 r3; +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_mbasyncnot; +#define FW_HELLO_CMD_ERR (1U << 31) +#define FW_HELLO_CMD_INIT (1U << 30) +#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) +#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) +#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) +#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) + __be32 fwrev; +}; + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_hm { + FW_CAPS_CONFIG_HM_PCIE = 0x00000001, + FW_CAPS_CONFIG_HM_PL = 0x00000002, + FW_CAPS_CONFIG_HM_SGE = 0x00000004, + FW_CAPS_CONFIG_HM_CIM = 0x00000008, + FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, + FW_CAPS_CONFIG_HM_TP = 0x00000020, + FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, + FW_CAPS_CONFIG_HM_PMRX = 0x00000080, + FW_CAPS_CONFIG_HM_PMTX = 0x00000100, + FW_CAPS_CONFIG_HM_MC = 0x00000200, + FW_CAPS_CONFIG_HM_LE = 0x00000400, + FW_CAPS_CONFIG_HM_MPS = 0x00000800, + FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, + FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, + FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, + FW_CAPS_CONFIG_HM_MI = 0x00008000, + FW_CAPS_CONFIG_HM_I2CM = 0x00010000, + FW_CAPS_CONFIG_HM_NCSI = 0x00020000, + FW_CAPS_CONFIG_HM_SMB = 0x00040000, + FW_CAPS_CONFIG_HM_MA = 0x00080000, + FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, + FW_CAPS_CONFIG_HM_PMU = 0x00200000, + FW_CAPS_CONFIG_HM_UART = 0x00400000, + FW_CAPS_CONFIG_HM_SF = 0x00800000, +}; + +enum fw_caps_config_nbm { + FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, + FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, +}; + +enum fw_caps_config_link { + FW_CAPS_CONFIG_LINK_PPP = 0x00000001, + FW_CAPS_CONFIG_LINK_QFC = 0x00000002, + FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, +}; + +enum fw_caps_config_switch { + FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, + FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC = 0x00000001, + FW_CAPS_CONFIG_NIC_VM = 0x00000002, +}; + +enum fw_caps_config_ofld { + FW_CAPS_CONFIG_OFLD = 0x00000001, +}; + +enum fw_caps_config_rdma { + FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, + FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, +}; + +enum fw_caps_config_iscsi { + FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, + FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, + FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, + FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, +}; + +enum fw_caps_config_fcoe { + FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, + FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 ofldcaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 r5; + __be64 r6; +}; + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_LAST +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, + FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, + FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, + FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, + FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, + FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, + FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, + FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, + FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, + FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, + FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, + FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, + FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, + FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, + FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, + FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, + FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, + FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, + FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, + FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, +}; + +#define FW_PARAMS_MNEM(x) ((x) << 24) +#define FW_PARAMS_PARAM_X(x) ((x) << 16) +#define FW_PARAMS_PARAM_Y(x) ((x) << 8) +#define FW_PARAMS_PARAM_Z(x) ((x) << 0) +#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) +#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define FW_PARAMS_CMD_PFN(x) ((x) << 8) +#define FW_PARAMS_CMD_VFN(x) ((x) << 0) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 cmask_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define FW_PFVF_CMD_PFN(x) ((x) << 8) +#define FW_PFVF_CMD_VFN(x) ((x) << 0) + +#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) +#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) + +#define FW_PFVF_CMD_NIQ(x) ((x) << 0) +#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_CMASK(x) ((x) << 24) +#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) + +#define FW_PFVF_CMD_PMASK(x) ((x) << 20) +#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) + +#define FW_PFVF_CMD_NEQ(x) ((x) << 0) +#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_TC(x) ((x) << 24) +#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_NVI(x) ((x) << 16) +#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) +#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) + +#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) +#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) +#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) +#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) + +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, + FW_IQ_TYPE_NO_FL_INT_CAP +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define FW_IQ_CMD_PFN(x) ((x) << 8) +#define FW_IQ_CMD_VFN(x) ((x) << 0) + +#define FW_IQ_CMD_ALLOC (1U << 31) +#define FW_IQ_CMD_FREE (1U << 30) +#define FW_IQ_CMD_MODIFY (1U << 29) +#define FW_IQ_CMD_IQSTART(x) ((x) << 28) +#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) + +#define FW_IQ_CMD_TYPE(x) ((x) << 29) +#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) +#define FW_IQ_CMD_VIID(x) ((x) << 16) +#define FW_IQ_CMD_IQANDST(x) ((x) << 15) +#define FW_IQ_CMD_IQANUS(x) ((x) << 14) +#define FW_IQ_CMD_IQANUD(x) ((x) << 12) +#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) + +#define FW_IQ_CMD_IQDROPRSS (1U << 15) +#define FW_IQ_CMD_IQGTSMODE (1U << 14) +#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) +#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) +#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) +#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) +#define FW_IQ_CMD_IQO (1U << 3) +#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) +#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) + +#define FW_IQ_CMD_IQNS(x) ((x) << 31) +#define FW_IQ_CMD_IQRO(x) ((x) << 30) +#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) +#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) +#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) +#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL0PADEN (1U << 2) +#define FW_IQ_CMD_FL0PACKEN (1U << 1) +#define FW_IQ_CMD_FL0CONGEN (1U << 0) + +#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) + +#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL1PADEN (1U << 2) +#define FW_IQ_CMD_FL1PACKEN (1U << 1) +#define FW_IQ_CMD_FL1CONGEN (1U << 0) + +#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 viid_pkd; + __be32 r8_lo; + __be64 r9; +}; + +#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) +#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) +#define FW_EQ_ETH_CMD_ALLOC (1U << 31) +#define FW_EQ_ETH_CMD_FREE (1U << 30) +#define FW_EQ_ETH_CMD_MODIFY (1U << 29) +#define FW_EQ_ETH_CMD_EQSTART (1U << 28) +#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) + +#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) +#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) +#define FW_EQ_CTRL_CMD_FREE (1U << 30) +#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) +#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) +#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) + +#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) +#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) +#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) +#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) +#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) +#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) + +struct fw_eq_ofld_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) +#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) +#define FW_EQ_OFLD_CMD_FREE (1U << 30) +#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) +#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) +#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) + +#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) + +/* + * Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ +#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) +#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) +#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 viid_pkd; + u8 mac[6]; + u8 portid_pkd; + u8 nmac; + u8 nmac0[6]; + __be16 rsssize_pkd; + u8 nmac1[6]; + __be16 r7; + u8 nmac2[6]; + __be16 r8; + u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define FW_VI_CMD_PFN(x) ((x) << 8) +#define FW_VI_CMD_VFN(x) ((x) << 0) +#define FW_VI_CMD_ALLOC (1U << 31) +#define FW_VI_CMD_FREE (1U << 30) +#define FW_VI_CMD_VIID(x) ((x) << 0) +#define FW_VI_CMD_PORTID(x) ((x) << 4) +#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_MAC_BASED_FREE 0x3FD + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_MPS_TCAM_ONLY, + FW_VI_MAC_SMT_ONLY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +enum fw_vi_mac_result { + FW_VI_MAC_R_SUCCESS, + FW_VI_MAC_R_F_NONEXISTENT_NOMEM, + FW_VI_MAC_R_SMAC_FAIL, + FW_VI_MAC_R_F_ACL_CHECK +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) +#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) +#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) +#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) +#define FW_VI_MAC_CMD_VALID (1U << 15) +#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) +#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) +#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) +#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) +#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) + +#define FW_RXMODE_MTU_NO_CHG 65535 + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_broadcasten; + __be32 r4_lo; +}; + +#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) +#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) +#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) +#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) +#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) +#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) +#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) +#define FW_VI_ENABLE_CMD_LED (1U << 29) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 +enum fw_vi_stats_vf_index { + FW_VI_VF_STAT_TX_BCAST_BYTES_IX, + FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_MCAST_BYTES_IX, + FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_UCAST_BYTES_IX, + FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_DROP_FRAMES_IX, + FW_VI_VF_STAT_TX_OFLD_BYTES_IX, + FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_VF_STAT_RX_BCAST_BYTES_IX, + FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_MCAST_BYTES_IX, + FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_UCAST_BYTES_IX, + FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_ERR_FRAMES_IX +}; + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) +#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) +#define FW_VI_STATS_CMD_IX(x) ((x) << 0) + +struct fw_acl_mac_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nmac; + u8 r3[7]; + __be16 r4; + u8 macaddr0[6]; + __be16 r5; + u8 macaddr1[6]; + __be16 r6; + u8 macaddr2[6]; + __be16 r7; + u8 macaddr3[6]; +}; + +#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) +#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) +#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) + +struct fw_acl_vlan_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nvlan; + u8 dropnovlan_fm; + u8 r3_lo[6]; + __be16 vlanid[16]; +}; + +#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) +#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) +#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) +#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) +#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) + +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDI_0 = 0x0200, + FW_PORT_CAP_MDI_1 = 0x0400, + FW_PORT_CAP_BEAN = 0x0800, + FW_PORT_CAP_PMA_LPBK = 0x1000, + FW_PORT_CAP_PCS_LPBK = 0x2000, + FW_PORT_CAP_PHYXS_LPBK = 0x4000, + FW_PORT_CAP_FAR_END_LPBK = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_MDI_UNCHANGED, + FW_PORT_MDI_AUTO, + FW_PORT_MDI_F_STRAIGHT, + FW_PORT_MDI_F_CROSSOVER +}; + +#define FW_PORT_MDI(x) ((x) << 9) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_L2_CFG = 0x0002, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L2_PPP_CFG = 0x0004, + FW_PORT_ACTION_L2_DCB_CFG = 0x0005, + FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, + FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, + FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, + FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, + FW_PORT_ACTION_L1_LPBK = 0x0021, + FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, + FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, + FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, + FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, + FW_PORT_ACTION_PHY_RESET = 0x0040, + FW_PORT_ACTION_PMA_RESET = 0x0041, + FW_PORT_ACTION_PCS_RESET = 0x0042, + FW_PORT_ACTION_PHYXS_RESET = 0x0043, + FW_PORT_ACTION_DTEXS_REEST = 0x0044, + FW_PORT_ACTION_AN_RESET = 0x0045 +}; + +enum fw_port_l2cfg_ctlbf { + FW_PORT_L2_CTLBF_OVLAN0 = 0x01, + FW_PORT_L2_CTLBF_OVLAN1 = 0x02, + FW_PORT_L2_CTLBF_OVLAN2 = 0x04, + FW_PORT_L2_CTLBF_OVLAN3 = 0x08, + FW_PORT_L2_CTLBF_IVLAN = 0x10, + FW_PORT_L2_CTLBF_TXIPG = 0x20 +}; + +enum fw_port_dcb_cfg { + FW_PORT_DCB_CFG_PG = 0x01, + FW_PORT_DCB_CFG_PFC = 0x02, + FW_PORT_DCB_CFG_APPL = 0x04 +}; + +enum fw_port_dcb_cfg_rc { + FW_PORT_DCB_CFG_SUCCESS = 0x0, + FW_PORT_DCB_CFG_ERROR = 0x1 +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __be16 ctlbf_to_ivlan0; + __be16 ivlantype; + __be32 txipg_pkd; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + } info; + struct fw_port_ppp { + __be32 pppen_to_ncsich; + __be32 r11; + } ppp; + struct fw_port_dcb { + __be16 cfg; + u8 up_map; + u8 sf_cfgrc; + __be16 prot_ix; + u8 pe7_to_pe0; + u8 numTCPFCs; + __be32 pgid0_to_pgid7; + __be32 numTCs_oui; + u8 pgpc[8]; + } dcb; + } u; +}; + +#define FW_PORT_CMD_READ (1U << 22) + +#define FW_PORT_CMD_PORTID(x) ((x) << 0) +#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#define FW_PORT_CMD_ACTION(x) ((x) << 16) + +#define FW_PORT_CMD_CTLBF(x) ((x) << 10) +#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) +#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) +#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) +#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) +#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) + +#define FW_PORT_CMD_TXIPG(x) ((x) << 19) + +#define FW_PORT_CMD_LSTATUS (1U << 31) +#define FW_PORT_CMD_LSPEED(x) ((x) << 24) +#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) +#define FW_PORT_CMD_TXPAUSE (1U << 23) +#define FW_PORT_CMD_RXPAUSE (1U << 22) +#define FW_PORT_CMD_MDIOCAP (1U << 21) +#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) +#define FW_PORT_CMD_LPTXPAUSE (1U << 15) +#define FW_PORT_CMD_LPRXPAUSE (1U << 14) +#define FW_PORT_CMD_PTYPE_MASK 0x1f +#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) +#define FW_PORT_CMD_MODTYPE_MASK 0x1f +#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) + +#define FW_PORT_CMD_PPPEN(x) ((x) << 31) +#define FW_PORT_CMD_TPSRC(x) ((x) << 28) +#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) + +#define FW_PORT_CMD_CH0(x) ((x) << 20) +#define FW_PORT_CMD_CH1(x) ((x) << 16) +#define FW_PORT_CMD_CH2(x) ((x) << 12) +#define FW_PORT_CMD_CH3(x) ((x) << 8) +#define FW_PORT_CMD_NCSICH(x) ((x) << 4) + +enum fw_port_type { + FW_PORT_TYPE_FIBER, + FW_PORT_TYPE_KX4, + FW_PORT_TYPE_BT_SGMII, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_BT_XAUI, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_CX4, + FW_PORT_TYPE_TWINAX, + + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK +}; + +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA, + FW_PORT_MOD_TYPE_LR, + FW_PORT_MOD_TYPE_SR, + FW_PORT_MOD_TYPE_ER, + + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + u8 nstats_bg_bm; + u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) +#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) + +/* port loopback stats */ +#define FW_NUM_LB_STATS 16 +enum fw_port_lb_stats_index { + FW_STAT_LB_PORT_BYTES_IX, + FW_STAT_LB_PORT_FRAMES_IX, + FW_STAT_LB_PORT_BCAST_IX, + FW_STAT_LB_PORT_MCAST_IX, + FW_STAT_LB_PORT_UCAST_IX, + FW_STAT_LB_PORT_ERROR_IX, + FW_STAT_LB_PORT_64B_IX, + FW_STAT_LB_PORT_65B_127B_IX, + FW_STAT_LB_PORT_128B_255B_IX, + FW_STAT_LB_PORT_256B_511B_IX, + FW_STAT_LB_PORT_512B_1023B_IX, + FW_STAT_LB_PORT_1024B_1518B_IX, + FW_STAT_LB_PORT_1519B_MAX_IX, + FW_STAT_LB_PORT_DROP_FRAMES_IX +}; + +struct fw_port_lb_stats_cmd { + __be32 op_to_lbport; + __be32 retval_len16; + union fw_port_lb_stats { + struct fw_port_lb_stats_ctl { + u8 nstats_bg_bm; + u8 ix_pkd; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_lb_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 rx_lb_drop; + __be64 rx_lb_trunc; + } all; + } u; +}; + +#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; +#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; +#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) +#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) +#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_pkd; + __be32 synmapen_to_hashtoeplitz; +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; +#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_ip4udpen; +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) +#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +enum fw_error_type { + FW_ERROR_TYPE_EXCEPTION = 0x0, + FW_ERROR_TYPE_HWMODULE = 0x1, + FW_ERROR_TYPE_WR = 0x2, + FW_ERROR_TYPE_ACL = 0x3, +}; + +struct fw_error_cmd { + __be32 op_to_type; + __be32 len16_pkd; + union fw_error { + struct fw_error_exception { + __be32 info[6]; + } exception; + struct fw_error_hwmodule { + __be32 regaddr; + __be32 regval; + } hwmodule; + struct fw_error_wr { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + u8 wrhdr[16]; + } wr; + struct fw_error_acl { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + __be16 mv_pkd; + u8 val[6]; + __be64 r4; + } acl; + } u; +}; + +struct fw_debug_cmd { + __be32 op_type; +#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + u8 filename_0_7[8]; + u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +struct fw_hdr { + u8 ver; + u8 reserved1; + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; + u8 intfver_nic; + u8 intfver_vnic; + u8 intfver_ofld; + u8 intfver_ri; + u8 intfver_iscsipdu; + u8 intfver_iscsi; + u8 intfver_fcoe; + u8 reserved2; + __be32 reserved3[27]; +}; + +#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) +#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) +#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) +#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 32960b9b02a..1f9df5c6a75 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -29,10 +29,6 @@ * PHY layer usage */ -/** Pending Items in this driver: - * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions) - */ - #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> @@ -504,12 +500,6 @@ static unsigned long mdio_max_freq; /* Cache macros - Packet buffers would be from skb pool which is cached */ #define EMAC_VIRT_NOCACHE(addr) (addr) -#define EMAC_CACHE_INVALIDATE(addr, size) \ - dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE) -#define EMAC_CACHE_WRITEBACK(addr, size) \ - dma_cache_maint((void *)addr, size, DMA_TO_DEVICE) -#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \ - dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL) /* DM644x does not have BD's in cached memory - so no cache functions */ #define BD_CACHE_INVALIDATE(addr, size) @@ -962,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev) emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); } if (!netdev_mc_empty(ndev)) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; + mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); /* program multicast address list into EMAC hardware */ - netdev_for_each_mc_addr(mc_ptr, ndev) { + netdev_for_each_mc_addr(ha, ndev) { emac_add_mcast(priv, EMAC_MULTICAST_ADD, - (u8 *) mc_ptr->dmi_addr); + (u8 *) ha->addr); } } else { mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); @@ -1235,6 +1226,10 @@ static void emac_txch_teardown(struct emac_priv *priv, u32 ch) if (1 == txch->queue_active) { curr_bd = txch->active_queue_head; while (curr_bd != NULL) { + dma_unmap_single(emac_dev, curr_bd->buff_ptr, + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, + DMA_TO_DEVICE); + emac_net_tx_complete(priv, (void __force *) &curr_bd->buf_token, 1, ch); if (curr_bd != txch->active_queue_tail) @@ -1327,6 +1322,11 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) txch->queue_active = 0; /* end of queue */ } } + + dma_unmap_single(emac_dev, curr_bd->buff_ptr, + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, + DMA_TO_DEVICE); + *tx_complete_ptr = (u32) curr_bd->buf_token; ++tx_complete_ptr; ++tx_complete_cnt; @@ -1387,8 +1387,8 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch) txch->bd_pool_head = curr_bd->next; curr_bd->buf_token = buf_list->buf_token; - /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ - curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr); + curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr, + buf_list->length, DMA_TO_DEVICE); curr_bd->off_b_len = buf_list->length; curr_bd->h_next = 0; curr_bd->next = NULL; @@ -1468,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) tx_buf.length = skb->len; tx_buf.buf_token = (void *)skb; tx_buf.data_ptr = skb->data; - EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len); ndev->trans_start = jiffies; ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); if (unlikely(ret_code != 0)) { @@ -1543,7 +1542,6 @@ static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size, p_skb->dev = ndev; skb_reserve(p_skb, NET_IP_ALIGN); *data_token = (void *) p_skb; - EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size); return p_skb->data; } @@ -1612,8 +1610,8 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param) /* populate the hardware descriptor */ curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, priv); - /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ - curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr); + curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr, + rxch->buf_size, DMA_FROM_DEVICE); curr_bd->off_b_len = rxch->buf_size; curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; @@ -1697,6 +1695,12 @@ static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch) curr_bd = rxch->active_queue_head; while (curr_bd) { if (curr_bd->buf_token) { + dma_unmap_single(&priv->ndev->dev, + curr_bd->buff_ptr, + curr_bd->off_b_len + & EMAC_RX_BD_BUF_SIZE, + DMA_FROM_DEVICE); + dev_kfree_skb_any((struct sk_buff *)\ curr_bd->buf_token); } @@ -1871,8 +1875,8 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch, /* populate the hardware descriptor */ curr_bd->h_next = 0; - /* FIXME buff_ptr = dma_map_single(... buffer ...) */ - curr_bd->buff_ptr = virt_to_phys(buffer); + curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer, + rxch->buf_size, DMA_FROM_DEVICE); curr_bd->off_b_len = rxch->buf_size; curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; curr_bd->next = NULL; @@ -1927,7 +1931,6 @@ static int emac_net_rx_cb(struct emac_priv *priv, p_skb = (struct sk_buff *)net_pkt_list->pkt_token; /* set length of packet */ skb_put(p_skb, net_pkt_list->pkt_length); - EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len); p_skb->protocol = eth_type_trans(p_skb, priv->ndev); netif_receive_skb(p_skb); priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; @@ -1990,6 +1993,11 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; rx_buf_obj->buf_token = curr_bd->buf_token; + + dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr, + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, + DMA_FROM_DEVICE); + curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; curr_pkt->num_bufs = 1; curr_pkt->pkt_length = @@ -2658,7 +2666,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) pdata = pdev->dev.platform_data; if (!pdata) { - printk(KERN_ERR "DaVinci EMAC: No platfrom data\n"); + printk(KERN_ERR "DaVinci EMAC: No platform data\n"); return -ENODEV; } @@ -2820,31 +2828,37 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev) return 0; } -static -int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state) +static int davinci_emac_suspend(struct device *dev) { - struct net_device *dev = platform_get_drvdata(pdev); + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); - if (netif_running(dev)) - emac_dev_stop(dev); + if (netif_running(ndev)) + emac_dev_stop(ndev); clk_disable(emac_clk); return 0; } -static int davinci_emac_resume(struct platform_device *pdev) +static int davinci_emac_resume(struct device *dev) { - struct net_device *dev = platform_get_drvdata(pdev); + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); clk_enable(emac_clk); - if (netif_running(dev)) - emac_dev_open(dev); + if (netif_running(ndev)) + emac_dev_open(ndev); return 0; } +static const struct dev_pm_ops davinci_emac_pm_ops = { + .suspend = davinci_emac_suspend, + .resume = davinci_emac_resume, +}; + /** * davinci_emac_driver: EMAC platform driver structure */ @@ -2852,11 +2866,10 @@ static struct platform_driver davinci_emac_driver = { .driver = { .name = "davinci_emac", .owner = THIS_MODULE, + .pm = &davinci_emac_pm_ops, }, .probe = davinci_emac_probe, .remove = __devexit_p(davinci_emac_remove), - .suspend = davinci_emac_suspend, - .resume = davinci_emac_resume, }; /** diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 8cf3cc6f20e..fb3f0984c28 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -940,7 +940,7 @@ static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile u16 *ib = (volatile u16 *)dev->mem_start; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -959,8 +959,8 @@ static void lance_load_multicast(struct net_device *dev) *lib_ptr(ib, filter[3], lp->type) = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index ed53a8d45f4..e5667c55844 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c @@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); int i; /* used as index in for loop */ - struct dev_mc_list *dmi; /* ptr to multicast addr entry */ + struct netdev_hw_addr *ha; /* Enable LLC frame promiscuous mode, if necessary */ @@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev) /* Copy addresses to multicast address table, then update adapter CAM */ i = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], - dmi->dmi_addr, FDDI_K_ALEN); + ha->addr, FDDI_K_ALEN); if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) { diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 744c1928dfc..a88300a0d1e 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev) static void SetMulticastFilter(struct net_device *dev) { struct depca_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i, j, bit, byte; u16 hashcode; @@ -1287,8 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev) lp->init_block.mcast_table[i] = 0; } /* Add multicast addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc(ETH_ALEN, addrs); hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index b05bad82982..6579225dbd9 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -1132,14 +1132,14 @@ set_multicast (struct net_device *dev) /* Receive broadcast and multicast frames */ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* Receive broadcast frames and multicast frames filtering by Hashtable */ rx_mode = ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { int bit, index = 0; - int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); + int crc = ether_crc_le(ETH_ALEN, ha->addr); /* The inverted high significant 6 bits of CRC are used as an index to hashtable */ for (bit = 0; bit < 6; bit++) diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 1c67f1138ca..a818ea998bb 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -33,6 +33,7 @@ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/irq.h> +#include <linux/slab.h> #include <asm/delay.h> #include <asm/irq.h> @@ -724,7 +725,7 @@ static void dm9000_hash_table(struct net_device *dev) { board_info_t *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; int i, oft; u32 hash_val; u16 hash_table[4]; @@ -752,8 +753,8 @@ dm9000_hash_table(struct net_device *dev) rcr |= RCR_ALL; /* the multicast address in Hash Table : 64 bits */ - netdev_for_each_mc_addr(mcptr, dev) { - hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + hash_val = ether_crc_le(6, ha->addr) & 0x3f; hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 234685213f1..d51a83e6958 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -918,7 +918,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", bp->regs, mem_base, dev->irq, dev->dev_addr); - dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n", + dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", diff --git a/drivers/net/e100.c b/drivers/net/e100.c index a26ccab057d..3e8d0005540 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -147,6 +147,8 @@ * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> @@ -174,7 +176,6 @@ #define DRV_VERSION "3.5.24-k2"DRV_EXT #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" -#define PFX DRV_NAME ": " #define E100_WATCHDOG_PERIOD (2 * HZ) #define E100_NAPI_WEIGHT 16 @@ -200,10 +201,6 @@ module_param(use_io, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); -#define DPRINTK(nlevel, klevel, fmt, args...) \ - (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ - printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ - __func__ , ## args)) #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ @@ -689,12 +686,13 @@ static int e100_self_test(struct nic *nic) /* Check results of self-test */ if (nic->mem->selftest.result != 0) { - DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n", - nic->mem->selftest.result); + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); return -ETIMEDOUT; } if (nic->mem->selftest.signature == 0) { - DPRINTK(HW, ERR, "Self-test failed: timed out\n"); + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); return -ETIMEDOUT; } @@ -797,7 +795,7 @@ static int e100_eeprom_load(struct nic *nic) /* The checksum, stored in the last word, is calculated such that * the sum of words should be 0xBABA */ if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { - DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); if (!eeprom_bad_csum_allow) return -EAGAIN; } @@ -953,8 +951,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) udelay(20); } if (unlikely(!i)) { - printk("e100.mdio_ctrl(%s) won't go Ready\n", - nic->netdev->name ); + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); spin_unlock_irqrestore(&nic->mdio_lock, flags); return 0; /* No way to indicate timeout error */ } @@ -966,9 +963,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) break; } spin_unlock_irqrestore(&nic->mdio_lock, flags); - DPRINTK(HW, DEBUG, - "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", - dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); return (u16)data_out; } @@ -1028,17 +1026,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, return ADVERTISE_10HALF | ADVERTISE_10FULL; default: - DPRINTK(HW, DEBUG, - "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", - dir == mdi_read ? "READ" : "WRITE", addr, reg, data); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); return 0xFFFF; } } else { switch (reg) { default: - DPRINTK(HW, DEBUG, - "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", - dir == mdi_read ? "READ" : "WRITE", addr, reg, data); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); return 0xFFFF; } } @@ -1155,12 +1155,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) } } - DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); - DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); - DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", + c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); } /************************************************************************* @@ -1253,16 +1256,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic) err = request_firmware(&fw, fw_name, &nic->pdev->dev); if (err) { - DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", - fw_name, err); + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); return ERR_PTR(err); } /* Firmware should be precisely UCODE_SIZE (words) plus three bytes indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ if (fw->size != UCODE_SIZE * 4 + 3) { - DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n", - fw_name, fw->size); + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); release_firmware(fw); return ERR_PTR(-EINVAL); } @@ -1274,9 +1279,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic) if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || min_size >= UCODE_SIZE) { - DPRINTK(PROBE, ERR, - "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", - fw_name, timer, bundle, min_size); + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); release_firmware(fw); return ERR_PTR(-EINVAL); } @@ -1328,7 +1333,8 @@ static inline int e100_load_ucode_wait(struct nic *nic) return PTR_ERR(fw); if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) - DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); /* must restart cuc */ nic->cuc_cmd = cuc_start; @@ -1348,7 +1354,7 @@ static inline int e100_load_ucode_wait(struct nic *nic) /* if the command failed, or is not OK, notify and return */ if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { - DPRINTK(PROBE,ERR, "ucode load failed\n"); + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); err = -EPERM; } @@ -1386,8 +1392,8 @@ static int e100_phy_check_without_mii(struct nic *nic) * media is sensed automatically based on how the link partner * is configured. This is, in essence, manual configuration. */ - DPRINTK(PROBE, INFO, - "found MII-less i82503 or 80c24 or other PHY\n"); + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ @@ -1434,18 +1440,20 @@ static int e100_phy_init(struct nic *nic) return 0; /* simply return and hope for the best */ else { /* for unknown cases log a fatal error */ - DPRINTK(HW, ERR, - "Failed to locate any known PHY, aborting.\n"); + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); return -EAGAIN; } } else - DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); /* Get phy ID */ id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); nic->phy = (u32)id_hi << 16 | (u32)id_lo; - DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); /* Select the phy and isolate the rest */ for (addr = 0; addr < 32; addr++) { @@ -1507,7 +1515,7 @@ static int e100_hw_init(struct nic *nic) e100_hw_reset(nic); - DPRINTK(HW, ERR, "e100_hw_init\n"); + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); if (!in_interrupt() && (err = e100_self_test(nic))) return err; @@ -1537,16 +1545,16 @@ static int e100_hw_init(struct nic *nic) static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct net_device *netdev = nic->netdev; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); cb->command = cpu_to_le16(cb_multi); cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); i = 0; - netdev_for_each_mc_addr(list, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == count) break; - memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr, + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, ETH_ALEN); } } @@ -1555,8 +1563,9 @@ static void e100_set_multicast_list(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); - DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", - netdev_mc_count(netdev), netdev->flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); if (netdev->flags & IFF_PROMISC) nic->flags |= promiscuous; @@ -1629,7 +1638,8 @@ static void e100_update_stats(struct nic *nic) if (e100_exec_cmd(nic, cuc_dump_reset, 0)) - DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); } static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) @@ -1659,20 +1669,19 @@ static void e100_watchdog(unsigned long data) struct nic *nic = (struct nic *)data; struct ethtool_cmd cmd; - DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies); + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); /* mii library handles link maintenance tasks */ mii_ethtool_gset(&nic->mii, &cmd); if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { - printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n", - nic->netdev->name, - cmd.speed == SPEED_100 ? "100" : "10", - cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + cmd.speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { - printk(KERN_INFO "e100: %s NIC Link is Down\n", - nic->netdev->name); + netdev_info(nic->netdev, "NIC Link is Down\n"); } mii_check_link(&nic->mii); @@ -1732,7 +1741,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, Issue a NOP command followed by a 1us delay before issuing the Tx command. */ if (e100_exec_cmd(nic, cuc_nop, 0)) - DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); udelay(1); } @@ -1741,12 +1751,14 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, switch (err) { case -ENOSPC: /* We queued the skb, but now we're out of space. */ - DPRINTK(TX_ERR, DEBUG, "No space for CB\n"); + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); netif_stop_queue(netdev); break; case -ENOMEM: /* This is a hard error - log it. */ - DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n"); + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); netif_stop_queue(netdev); return NETDEV_TX_BUSY; } @@ -1767,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic) for (cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete); cb = nic->cb_to_clean = cb->next) { - DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n", - (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), - cb->status); + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); if (likely(cb->skb != NULL)) { dev->stats.tx_packets++; @@ -1912,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); rfd_status = le16_to_cpu(rfd->status); - DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); /* If data isn't ready, nothing to indicate */ if (unlikely(!(rfd_status & cb_complete))) { @@ -2123,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id) struct nic *nic = netdev_priv(netdev); u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); - DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ stat_ack == stat_ack_not_present) /* Hardware is ejected */ @@ -2263,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work) struct nic *nic = container_of(work, struct nic, tx_timeout_task); struct net_device *netdev = nic->netdev; - DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", - ioread8(&nic->csr->scb.status)); + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); e100_down(netdev_priv(netdev)); e100_up(netdev_priv(netdev)); } @@ -2526,8 +2541,8 @@ static int e100_set_ringparam(struct net_device *netdev, rfds->count = min(rfds->count, rfds->max); cbs->count = max(ring->tx_pending, cbs->min); cbs->count = min(cbs->count, cbs->max); - DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", - rfds->count, cbs->count); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); if (netif_running(netdev)) e100_up(nic); @@ -2704,7 +2719,7 @@ static int e100_open(struct net_device *netdev) netif_carrier_off(netdev); if ((err = e100_up(nic))) - DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n"); + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); return err; } @@ -2738,7 +2753,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { if (((1 << debug) - 1) & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n"); + pr_err("Etherdev alloc failed, aborting\n"); return -ENOMEM; } @@ -2756,35 +2771,34 @@ static int __devinit e100_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, netdev); if ((err = pci_enable_device(pdev))) { - DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); goto err_out_free_dev; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - DPRINTK(PROBE, ERR, "Cannot find proper PCI device " - "base address, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } if ((err = pci_request_regions(pdev, DRV_NAME))) { - DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { - DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n"); + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); goto err_out_free_res; } SET_NETDEV_DEV(netdev, &pdev->dev); if (use_io) - DPRINTK(PROBE, INFO, "using i/o access mode\n"); + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); if (!nic->csr) { - DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_free_res; } @@ -2818,7 +2832,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); if ((err = e100_alloc(nic))) { - DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); goto err_out_iounmap; } @@ -2831,13 +2845,11 @@ static int __devinit e100_probe(struct pci_dev *pdev, memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); if (!is_valid_ether_addr(netdev->perm_addr)) { if (!eeprom_bad_csum_allow) { - DPRINTK(PROBE, ERR, "Invalid MAC address from " - "EEPROM, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); err = -EAGAIN; goto err_out_free; } else { - DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, " - "you MUST configure one.\n"); + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); } } @@ -2853,17 +2865,18 @@ static int __devinit e100_probe(struct pci_dev *pdev, strcpy(netdev->name, "eth%d"); if ((err = register_netdev(netdev))) { - DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); goto err_out_free; } nic->cbs_pool = pci_pool_create(netdev->name, nic->pdev, - nic->params.cbs.count * sizeof(struct cb), + nic->params.cbs.max * sizeof(struct cb), sizeof(u32), 0); - DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", - (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), - pdev->irq, netdev->dev_addr); + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); return 0; @@ -3021,7 +3034,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) struct nic *nic = netdev_priv(netdev); if (pci_enable_device(pdev)) { - printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n"); + pr_err("Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -3080,8 +3093,8 @@ static struct pci_driver e100_driver = { static int __init e100_init_module(void) { if (((1 << debug) - 1) & NETIF_MSG_DRV) { - printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); - printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); + pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); } return pci_register_driver(&e100_driver); } diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 9902b33b716..2f29c213185 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -261,7 +261,6 @@ struct e1000_adapter { /* TX */ struct e1000_tx_ring *tx_ring; /* One per active queue */ unsigned int restart_queue; - unsigned long tx_queue_len; u32 txd_cmd; u32 tx_int_delay; u32 tx_abs_int_delay; diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8d7d87f1282..e2b6e6e7ba6 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -990,7 +990,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) DEBUGOUT("Error, did not detect valid phy.\n"); return ret_val; } - DEBUGOUT1("Phy ID = %x \n", hw->phy_id); + DEBUGOUT1("Phy ID = %x\n", hw->phy_id); /* Set PHY to class A mode (if necessary) */ ret_val = e1000_set_phy_mode(hw); @@ -1680,7 +1680,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) if (ret_val) return ret_val; - DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); + DEBUGOUT1("M88E1000 PSCR: %x\n", phy_data); /* Need to reset the PHY or these changes will be ignored */ mii_ctrl_reg |= MII_CR_RESET; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e..974a02d8182 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter) adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); } - - adapter->tx_queue_len = netdev->tx_queue_len; } int e1000_up(struct e1000_adapter *adapter) @@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -2101,7 +2098,6 @@ static void e1000_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; bool use_uc = false; - struct dev_addr_list *mc_ptr; u32 rctl; u32 hash_value; int i, rar_entries = E1000_RAR_ENTRIES; @@ -2161,17 +2157,17 @@ static void e1000_set_rx_mode(struct net_device *netdev) WARN_ON(i == rar_entries); - netdev_for_each_mc_addr(mc_ptr, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == rar_entries) { /* load any remaining addresses into the hash table */ u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); + hash_value = e1000_hash_mc_addr(hw, ha->addr); hash_reg = (hash_value >> 5) & 0x7F; hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); mcarray[hash_reg] |= mta; } else { - e1000_rar_set(hw, mc_ptr->da_addr, i++); + e1000_rar_set(hw, ha->addr, i++); } } @@ -2316,19 +2312,15 @@ static void e1000_watchdog(unsigned long data) E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); - /* tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = false; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = false; - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } @@ -2937,7 +2929,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len - skb->data_len; + unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; @@ -2988,7 +2980,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - len = skb->len - skb->data_len; + len = skb_headlen(skb); break; default: /* do nothing */ diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 3c95acb3a87..4b0016d6953 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -323,7 +323,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) } /* - * Initialze device specific counter of SMBI acquisition + * Initialize device specific counter of SMBI acquisition * timeouts. */ hw->dev_spec.e82571.smb_counter = 0; @@ -1346,7 +1346,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) * * 1) down * 2) autoneg_progress - * 3) autoneg_complete (the link sucessfully autonegotiated) + * 3) autoneg_complete (the link successfully autonegotiated) * 4) forced_up (the link has been forced up, it did not autonegotiate) * **/ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c2ec095d216..12648a1cdb7 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -42,25 +42,16 @@ struct e1000_info; -#define e_printk(level, adapter, format, arg...) \ - printk(level "%s: %s: " format, pci_name(adapter->pdev), \ - adapter->netdev->name, ## arg) - -#ifdef DEBUG #define e_dbg(format, arg...) \ - e_printk(KERN_DEBUG , hw->adapter, format, ## arg) -#else -#define e_dbg(format, arg...) do { (void)(hw); } while (0) -#endif - + netdev_dbg(hw->adapter->netdev, format, ## arg) #define e_err(format, arg...) \ - e_printk(KERN_ERR, adapter, format, ## arg) + netdev_err(adapter->netdev, format, ## arg) #define e_info(format, arg...) \ - e_printk(KERN_INFO, adapter, format, ## arg) + netdev_info(adapter->netdev, format, ## arg) #define e_warn(format, arg...) \ - e_printk(KERN_WARNING, adapter, format, ## arg) + netdev_warn(adapter->netdev, format, ## arg) #define e_notice(format, arg...) \ - e_printk(KERN_NOTICE, adapter, format, ## arg) + netdev_notice(adapter->netdev, format, ## arg) /* Interrupt modes, as used by the IntMode parameter */ @@ -158,6 +149,9 @@ struct e1000_info; #define HV_M_STATUS_SPEED_1000 0x0200 #define HV_M_STATUS_LINK_UP 0x0040 +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + enum e1000_boards { board_82571, board_82572, @@ -279,7 +273,6 @@ struct e1000_adapter { struct napi_struct napi; - unsigned long tx_queue_len; unsigned int restart_queue; u32 txd_cmd; @@ -370,6 +363,8 @@ struct e1000_adapter { struct work_struct update_phy_task; struct work_struct led_blink_task; struct work_struct print_hang_task; + + bool idle_check; }; struct e1000_info { diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index b33e3cbe9ab..983493f2330 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -31,6 +31,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/delay.h> #include "e1000.h" diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 8b5e157e9c8..5059c22155d 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -1622,7 +1622,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Check if the flash descriptor is valid */ if (hsfsts.hsf_status.fldesvalid == 0) { e_dbg("Flash descriptor invalid. " - "SW Sequencing must be used."); + "SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } @@ -1671,7 +1671,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) hsfsts.hsf_status.flcdone = 1; ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { - e_dbg("Flash controller busy, cannot get access"); + e_dbg("Flash controller busy, cannot get access\n"); } } @@ -1822,7 +1822,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, continue; } else if (hsfsts.hsf_status.flcdone == 0) { e_dbg("Timeout error - flash cycle " - "did not complete."); + "did not complete.\n"); break; } } diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 2425ed11d5c..b0d2a60aa49 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -647,7 +647,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; e_dbg("SERDES: Link up - autoneg " - "completed sucessfully.\n"); + "completed successfully.\n"); } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - invalid" @@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup u32 status; status = er32(STATUS); - if (status & E1000_STATUS_SPEED_1000) { + if (status & E1000_STATUS_SPEED_1000) *speed = SPEED_1000; - e_dbg("1000 Mbs, "); - } else if (status & E1000_STATUS_SPEED_100) { + else if (status & E1000_STATUS_SPEED_100) *speed = SPEED_100; - e_dbg("100 Mbs, "); - } else { + else *speed = SPEED_10; - e_dbg("10 Mbs, "); - } - if (status & E1000_STATUS_FD) { + if (status & E1000_STATUS_FD) *duplex = FULL_DUPLEX; - e_dbg("Full Duplex\n"); - } else { + else *duplex = HALF_DUPLEX; - e_dbg("Half Duplex\n"); - } + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); return 0; } diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 88d54d3efce..5f70c437fa4 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -26,6 +26,8 @@ *******************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> @@ -36,6 +38,7 @@ #include <linux/netdevice.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/mii.h> @@ -44,6 +47,7 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/pm_qos_params.h> +#include <linux/pm_runtime.h> #include <linux/aer.h> #include "e1000.h" @@ -660,6 +664,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) i = 0; } + if (i == tx_ring->next_to_use) + break; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } @@ -2289,8 +2295,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); e1000e_config_collision_dist(hw); - - adapter->tx_queue_len = adapter->netdev->tx_queue_len; } /** @@ -2564,7 +2568,7 @@ static void e1000_set_multi(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list; u32 rctl; int i; @@ -2596,9 +2600,8 @@ static void e1000_set_multi(struct net_device *netdev) /* prepare a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), - mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); e1000_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -2877,7 +2880,6 @@ void e1000e_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3083,12 +3085,15 @@ static int e1000_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; int err; /* disallow open during test */ if (test_bit(__E1000_TESTING, &adapter->state)) return -EBUSY; + pm_runtime_get_sync(&pdev->dev); + netif_carrier_off(netdev); /* allocate transmit descriptors */ @@ -3149,6 +3154,9 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); + adapter->idle_check = true; + pm_runtime_put(&pdev->dev); + /* fire a link status change interrupt to start the watchdog */ ew32(ICS, E1000_ICS_LSC); @@ -3162,6 +3170,7 @@ err_setup_rx: e1000e_free_tx_resources(adapter); err_setup_tx: e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); return err; } @@ -3180,11 +3189,17 @@ err_setup_tx: static int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); - e1000e_down(adapter); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } e1000_power_down_phy(adapter); - e1000_free_irq(adapter); e1000e_free_tx_resources(adapter); e1000e_free_rx_resources(adapter); @@ -3206,6 +3221,8 @@ static int e1000_close(struct net_device *netdev) if (adapter->flags & FLAG_HAS_AMT) e1000_release_hw_control(adapter); + pm_runtime_put_sync(&pdev->dev); + return 0; } /** @@ -3550,6 +3567,9 @@ static void e1000_watchdog_task(struct work_struct *work) link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + e1000e_enable_receives(adapter); goto link_up; } @@ -3561,6 +3581,10 @@ static void e1000_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { bool txb2b = 1; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, @@ -3588,21 +3612,15 @@ static void e1000_watchdog_task(struct work_struct *work) "link gets many collisions.\n"); } - /* - * tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor - */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = 0; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = 0; - netdev->tx_queue_len = 100; adapter->tx_timeout_factor = 10; break; } @@ -3676,6 +3694,9 @@ static void e1000_watchdog_task(struct work_struct *work) if (adapter->flags & FLAG_RX_NEEDS_RESTART) schedule_work(&adapter->reset_task); + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); } } @@ -4111,7 +4132,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, unsigned int max_per_txd = E1000_MAX_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len - skb->data_len; + unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; @@ -4161,7 +4182,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - len = skb->len - skb->data_len; + len = skb_headlen(skb); } } @@ -4473,13 +4494,15 @@ out: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 ctrl, ctrl_ext, rctl, status; - u32 wufc = adapter->wol; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; int retval = 0; netif_device_detach(netdev); @@ -4636,43 +4659,21 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) } } -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_OPS +static bool e1000e_pm_ready(struct e1000_adapter *adapter) { - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - - return retval; + return !!adapter->tx_ring->buffer_info; } -static int e1000_resume(struct pci_dev *pdev) +static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); e1000e_disable_l1aspm(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, - "Cannot enable PCI device from suspend\n"); - return err; - } - - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { err = e1000_request_irq(adapter); @@ -4730,13 +4731,88 @@ static int e1000_resume(struct pci_dev *pdev) return 0; } -#endif + +#ifdef CONFIG_PM_SLEEP +static int e1000_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake, false); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + +static int e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) + adapter->idle_check = true; + + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int e1000_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) { + bool wake; + + __e1000_shutdown(pdev, &wake, true); + } + + return 0; +} + +static int e1000_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + if (adapter->idle_check) { + adapter->idle_check = false; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + adapter->idle_check = !dev->power.runtime_auto; + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM_OPS */ static void e1000_shutdown(struct pci_dev *pdev) { bool wake = false; - __e1000_shutdown(pdev, &wake); + __e1000_shutdown(pdev, &wake, false); if (system_state == SYSTEM_POWER_OFF) e1000_complete_shutdown(pdev, false, wake); @@ -4809,8 +4885,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); + pdev->state_saved = true; pci_restore_state(pdev); - pci_save_state(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -5217,6 +5293,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev, e1000_print_device_info(adapter); + if (pci_dev_run_wake(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } + pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC); + return 0; err_register: @@ -5259,12 +5341,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + pm_runtime_get_sync(&pdev->dev); /* * flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ - set_bit(__E1000_DOWN, &adapter->state); + if (!down) + set_bit(__E1000_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -5278,8 +5364,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev) if (!(netdev->flags & IFF_UP)) e1000_power_down_phy(adapter); + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); unregister_netdev(netdev); + if (pci_dev_run_wake(pdev)) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + } + pm_runtime_put_noidle(&pdev->dev); + /* * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -5379,16 +5474,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); +#ifdef CONFIG_PM_OPS +static const struct dev_pm_ops e1000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, + e1000_runtime_resume, e1000_idle) +}; +#endif + /* PCI Device API Driver */ static struct pci_driver e1000_driver = { .name = e1000e_driver_name, .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = __devexit_p(e1000_remove), -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = e1000_suspend, - .resume = e1000_resume, +#ifdef CONFIG_PM_OPS + .driver.pm = &e1000_pm_ops, #endif .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler @@ -5403,10 +5504,9 @@ static struct pci_driver e1000_driver = { static int __init e1000_init_module(void) { int ret; - printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", - e1000e_driver_name, e1000e_driver_version); - printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", - e1000e_driver_name); + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index 2e399778cae..f775a481063 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } { /* Transmit Interrupt Delay */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = range_option, .name = "Transmit Interrupt Delay", .err = "using default of " @@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Transmit Absolute Interrupt Delay */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = range_option, .name = "Transmit Absolute Interrupt Delay", .err = "using default of " @@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Receive Absolute Interrupt Delay */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = range_option, .name = "Receive Absolute Interrupt Delay", .err = "using default of " @@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Interrupt Throttling Rate */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = range_option, .name = "Interrupt Throttling Rate (ints/sec)", .err = "using default of " @@ -399,7 +399,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Smart Power Down */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = enable_option, .name = "PHY Smart Power Down", .err = "defaulting to Disabled", @@ -415,7 +415,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* CRC Stripping */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = enable_option, .name = "CRC Stripping", .err = "defaulting to enabled", @@ -432,7 +432,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Kumeran Lock Loss Workaround */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = enable_option, .name = "Kumeran Lock Loss Workaround", .err = "defaulting to Enabled", @@ -452,7 +452,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } } { /* Write-protect NVM */ - const struct e1000_option opt = { + static const struct e1000_option opt = { .type = enable_option, .name = "Write-protect NVM", .err = "defaulting to Enabled", diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 1b05bdf62c3..eed65d821e4 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -137,7 +137,6 @@ static const char version[] = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> @@ -646,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev) if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); - printk(KERN_DEBUG "port(s) \n"); + printk(KERN_DEBUG "port(s)\n"); Word = lp->word[6]; printk(KERN_DEBUG "Word6:\n"); @@ -766,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe) /* Grab the region so we can find another board if autoIRQ fails. */ if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { if (!autoprobe) - printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", + printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n", ioaddr); return -EBUSY; } @@ -1287,7 +1286,7 @@ set_multicast_list(struct net_device *dev) struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned short mode; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int mc_count = netdev_mc_count(dev); if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) @@ -1332,8 +1331,8 @@ set_multicast_list(struct net_device *dev) outw(0, ioaddr + IO_PORT); outw(6 * (mc_count + 1), ioaddr + IO_PORT); - netdev_for_each_mc_addr(dmi, dev) { - eaddrs = (unsigned short *) dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (unsigned short *) ha->addr; outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 7013dc8a6cb..c31dd068555 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -111,7 +111,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/mca-legacy.h> #include <linux/spinlock.h> #include <linux/bitops.h> @@ -1576,7 +1575,7 @@ static void eexp_hw_init586(struct net_device *dev) static void eexp_setup_filter(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned short ioaddr = dev->base_addr; int count = netdev_mc_count(dev); int i; @@ -1589,8 +1588,8 @@ static void eexp_setup_filter(struct net_device *dev) outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); i = 0; - netdev_for_each_mc_addr(dmi, dev) { - unsigned short *data = (unsigned short *) dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + unsigned short *data = (unsigned short *) ha->addr; if (i == count) break; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index b004eaba3d7..3f445efa948 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -32,6 +32,7 @@ #include <linux/udp.h> #include <linux/if.h> #include <linux/list.h> +#include <linux/slab.h> #include <linux/if_ether.h> #include <linux/notifier.h> #include <linux/reboot.h> @@ -1617,7 +1618,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, { struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; - int skb_data_size = skb->len - skb->data_len; + int skb_data_size = skb_headlen(skb); int headersize; /* Packet is TCP with TSO enabled */ @@ -1628,7 +1629,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, */ headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); - skb_data_size = skb->len - skb->data_len; + skb_data_size = skb_headlen(skb); if (skb_data_size >= headersize) { /* copy immediate data */ @@ -1650,7 +1651,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, static void write_swqe2_nonTSO(struct sk_buff *skb, struct ehea_swqe *swqe, u32 lkey) { - int skb_data_size = skb->len - skb->data_len; + int skb_data_size = skb_headlen(skb); u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; @@ -1966,7 +1967,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) static void ehea_set_multicast_list(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); - struct dev_mc_list *k_mcl_entry; + struct netdev_hw_addr *ha; int ret; if (dev->flags & IFF_PROMISC) { @@ -1997,8 +1998,8 @@ static void ehea_set_multicast_list(struct net_device *dev) goto out; } - netdev_for_each_mc_addr(k_mcl_entry, dev) - ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + ehea_add_multicast_entry(port, ha->addr); } out: @@ -2107,8 +2108,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, } else { /* first copy data from the skb->data buffer ... */ skb_copy_from_linear_data(skb, imm_data, - skb->len - skb->data_len); - imm_data += skb->len - skb->data_len; + skb_headlen(skb)); + imm_data += skb_headlen(skb); /* ... then copy data from the fragments */ for (i = 0; i < nfrags; i++) { diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 18d405f78c0..a1b4c7e5636 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -27,6 +27,7 @@ */ #include <linux/mm.h> +#include <linux/slab.h> #include "ehea.h" #include "ehea_phyp.h" #include "ehea_qmr.h" diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 3ee32e58c7e..ff27f728fd9 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -18,7 +18,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h index 03dce9ed612..337d1943af4 100644 --- a/drivers/net/enic/cq_enet_desc.h +++ b/drivers/net/enic/cq_enet_desc.h @@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) { - u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); - u16 q_number_rss_type_flags = - le16_to_cpu(desc->q_number_rss_type_flags); - u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + u16 completed_index_flags; + u16 q_number_rss_type_flags; + u16 bytes_written_flags; cq_desc_dec((struct cq_desc *)desc, type, color, q_number, completed_index); + completed_index_flags = le16_to_cpu(desc->completed_index_flags); + q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + *ingress_port = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index ee01f5a6d0d..5fa56f1e559 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -33,8 +33,8 @@ #include "vnic_rss.h" #define DRV_NAME "enic" -#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" -#define DRV_VERSION "1.1.0.241a" +#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" +#define DRV_VERSION "1.3.1.1" #define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" #define PFX DRV_NAME ": " diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index cf098bb636b..1232887c243 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -822,14 +822,14 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) static void enic_set_multicast_list(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); - struct dev_mc_list *list; + struct netdev_hw_addr *ha; int directed = 1; int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; unsigned int mc_count = netdev_mc_count(netdev); int allmulti = (netdev->flags & IFF_ALLMULTI) || - mc_count > ENIC_MULTICAST_PERFECT_FILTERS; + mc_count > ENIC_MULTICAST_PERFECT_FILTERS; unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; unsigned int i, j; @@ -852,10 +852,10 @@ static void enic_set_multicast_list(struct net_device *netdev) */ i = 0; - netdev_for_each_mc_addr(list, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == mc_count) break; - memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); + memcpy(mc_addr[i++], ha->addr, ETH_ALEN); } for (i = 0; i < enic->mc_count; i++) { @@ -2058,8 +2058,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; - netdev->features |= NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; if (ENIC_SETTING(enic, TXCSUM)) netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c index 69b9b70c7da..d43a9d43bbf 100644 --- a/drivers/net/enic/vnic_dev.c +++ b/drivers/net/enic/vnic_dev.c @@ -23,6 +23,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/if_ether.h> +#include <linux/slab.h> #include "vnic_resource.h" #include "vnic_devcmd.h" @@ -573,22 +574,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) return err; } -int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, u16 intr) { u64 a0, a1; int wait = 1000; int r; - if (!vdev->notify) { - vdev->notify = pci_alloc_consistent(vdev->pdev, - sizeof(struct vnic_devcmd_notify), - &vdev->notify_pa); - if (!vdev->notify) - return -ENOMEM; - memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify)); - } + memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); + vdev->notify = notify_addr; + vdev->notify_pa = notify_pa; - a0 = vdev->notify_pa; + a0 = (u64)notify_pa; a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; a1 += sizeof(struct vnic_devcmd_notify); @@ -597,7 +594,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) return r; } -void vnic_dev_notify_unset(struct vnic_dev *vdev) +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + void *notify_addr; + dma_addr_t notify_pa; + + if (vdev->notify || vdev->notify_pa) { + printk(KERN_ERR "notify block %p still allocated", + vdev->notify); + return -EINVAL; + } + + notify_addr = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + ¬ify_pa); + if (!notify_addr) + return -ENOMEM; + + return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); +} + +void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) { u64 a0, a1; int wait = 1000; @@ -607,9 +624,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev) a1 += sizeof(struct vnic_devcmd_notify); vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + vdev->notify = NULL; + vdev->notify_pa = 0; vdev->notify_sz = 0; } +void vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + if (vdev->notify) { + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + } + + vnic_dev_notify_unsetcmd(vdev); +} + static int vnic_dev_notify_ready(struct vnic_dev *vdev) { u32 *words; diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h index fc5e3eb35a5..f5be640b0b5 100644 --- a/drivers/net/enic/vnic_dev.h +++ b/drivers/net/enic/vnic_dev.h @@ -107,7 +107,10 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, u16 intr); int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); void vnic_dev_notify_unset(struct vnic_dev *vdev); int vnic_dev_link_status(struct vnic_dev *vdev); u32 vnic_dev_port_speed(struct vnic_dev *vdev); diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c index 75583978a5e..cc580cfec41 100644 --- a/drivers/net/enic/vnic_rq.c +++ b/drivers/net/enic/vnic_rq.c @@ -22,6 +22,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> +#include <linux/slab.h> #include "vnic_dev.h" #include "vnic_rq.h" @@ -167,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq) iowrite32(0, &rq->ctrl->enable); /* Wait for HW to ACK disable request */ - for (wait = 0; wait < 100; wait++) { + for (wait = 0; wait < 1000; wait++) { if (!(ioread32(&rq->ctrl->running))) return 0; - udelay(1); + udelay(10); } printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c index d2e00e51b7b..1378afbdfe6 100644 --- a/drivers/net/enic/vnic_wq.c +++ b/drivers/net/enic/vnic_wq.c @@ -22,6 +22,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> +#include <linux/slab.h> #include "vnic_dev.h" #include "vnic_wq.h" @@ -160,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq) iowrite32(0, &wq->ctrl->enable); /* Wait for HW to ACK disable request */ - for (wait = 0; wait < 100; wait++) { + for (wait = 0; wait < 1000; wait++) { if (!(ioread32(&wq->ctrl->running))) return 0; - udelay(1); + udelay(10); } printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 39c271b6be4..8b5a203d3aa 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -73,7 +73,6 @@ static int rx_copybreak; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> @@ -1400,12 +1399,12 @@ static void set_rx_mode(struct net_device *dev) outl(0x0004, ioaddr + RxCtrl); return; } else { /* Never executed, for now. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit_nr = - ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; + ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 3] |= (1 << bit_nr); } } diff --git a/drivers/net/eql.c b/drivers/net/eql.c index f5b96cadeb2..b34a2ddeef4 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -115,6 +115,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <net/net_namespace.h> diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index d3abeee3f11..d4e24f08b3b 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -152,7 +152,6 @@ static char *version = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 209742304e2..6bd03c8b888 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -18,6 +18,7 @@ #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> +#include <linux/slab.h> #include <net/ethoc.h> static int buffer_size = 0x8000; /* 32 KBytes */ @@ -755,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ @@ -783,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev) hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { - netdev_for_each_mc_addr(mc, dev) { - u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index 91e59f3a9d6..11ba70f4997 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c @@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev) static void SetMulticastFilter(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u_long iobase = dev->base_addr; int i; char *addrs, bit, byte; @@ -1213,8 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev) } /* Update table */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc_le(ETH_ALEN, addrs); hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ @@ -1776,8 +1776,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; case EWRK3_SET_MCA: /* Set a multicast address */ if (capable(CAP_NET_ADMIN)) { - if (ioc->len > 1024) - { + if (ioc->len > HASH_TABLE_LEN) { status = -EINVAL; break; } diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 9d5ad08a119..51b738dd654 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -74,7 +74,6 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -1792,12 +1791,12 @@ static void __set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = CR_W_AB | CR_W_AM; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit; - bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; + bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; mc_filter[bit >> 5] |= (1 << bit); } rx_mode = CR_W_AB | CR_W_AM; diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 9f98c1c4a34..2b1651aee13 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -40,6 +40,7 @@ #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <asm/cacheflush.h> @@ -61,7 +62,6 @@ * Define the fixed address of the FEC hardware. */ #if defined(CONFIG_M5272) -#define HAVE_mii_link_interrupt static unsigned char fec_mac_default[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -86,23 +86,6 @@ static unsigned char fec_mac_default[] = { #endif #endif /* CONFIG_M5272 */ -/* Forward declarations of some structures to support different PHYs */ - -typedef struct { - uint mii_data; - void (*funct)(uint mii_reg, struct net_device *dev); -} phy_cmd_t; - -typedef struct { - uint id; - char *name; - - const phy_cmd_t *config; - const phy_cmd_t *startup; - const phy_cmd_t *ack_int; - const phy_cmd_t *shutdown; -} phy_info_t; - /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best * to keep them that size. @@ -189,29 +172,21 @@ struct fec_enet_private { uint tx_full; /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ spinlock_t hw_lock; - /* hold while accessing the mii_list_t() elements */ - spinlock_t mii_lock; - - uint phy_id; - uint phy_id_done; - uint phy_status; - uint phy_speed; - phy_info_t const *phy; - struct work_struct phy_task; - uint sequence_done; - uint mii_phy_task_queued; + struct platform_device *pdev; - uint phy_addr; + int opened; + /* Phylib and MDIO interface */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + int mii_timeout; + uint phy_speed; int index; - int opened; int link; - int old_link; int full_duplex; }; -static void fec_enet_mii(struct net_device *dev); static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); static void fec_enet_tx(struct net_device *dev); static void fec_enet_rx(struct net_device *dev); @@ -219,67 +194,20 @@ static int fec_enet_close(struct net_device *dev); static void fec_restart(struct net_device *dev, int duplex); static void fec_stop(struct net_device *dev); +/* FEC MII MMFR bits definition */ +#define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) +#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) +#define FEC_MMFR_TA (2 << 16) +#define FEC_MMFR_DATA(v) (v & 0xffff) -/* MII processing. We keep this as simple as possible. Requests are - * placed on the list (if there is room). When the request is finished - * by the MII, an optional function may be called. - */ -typedef struct mii_list { - uint mii_regval; - void (*mii_func)(uint val, struct net_device *dev); - struct mii_list *mii_next; -} mii_list_t; - -#define NMII 20 -static mii_list_t mii_cmds[NMII]; -static mii_list_t *mii_free; -static mii_list_t *mii_head; -static mii_list_t *mii_tail; - -static int mii_queue(struct net_device *dev, int request, - void (*func)(uint, struct net_device *)); - -/* Make MII read/write commands for the FEC */ -#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) -#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ - (VAL & 0xffff)) -#define mk_mii_end 0 +#define FEC_MII_TIMEOUT 10000 /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) -/* Register definitions for the PHY */ - -#define MII_REG_CR 0 /* Control Register */ -#define MII_REG_SR 1 /* Status Register */ -#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ -#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ -#define MII_REG_ANAR 4 /* A-N Advertisement Register */ -#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ -#define MII_REG_ANER 6 /* A-N Expansion Register */ -#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ -#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ - -/* values for phy_status */ - -#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ -#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ -#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ -#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ -#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ -#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ -#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ - -#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ -#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ -#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ -#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ -#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ -#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ -#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ -#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ - - static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -406,12 +334,6 @@ fec_enet_interrupt(int irq, void * dev_id) ret = IRQ_HANDLED; fec_enet_tx(dev); } - - if (int_events & FEC_ENET_MII) { - ret = IRQ_HANDLED; - fec_enet_mii(dev); - } - } while (int_events); return ret; @@ -607,827 +529,311 @@ rx_processing_done: spin_unlock(&fep->hw_lock); } -/* called from interrupt context */ -static void -fec_enet_mii(struct net_device *dev) -{ - struct fec_enet_private *fep; - mii_list_t *mip; - - fep = netdev_priv(dev); - spin_lock(&fep->mii_lock); - - if ((mip = mii_head) == NULL) { - printk("MII and no head!\n"); - goto unlock; - } - - if (mip->mii_func != NULL) - (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); - - mii_head = mip->mii_next; - mip->mii_next = mii_free; - mii_free = mip; - - if ((mip = mii_head) != NULL) - writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); - -unlock: - spin_unlock(&fep->mii_lock); -} - -static int -mii_queue_unlocked(struct net_device *dev, int regval, - void (*func)(uint, struct net_device *)) +/* ------------------------------------------------------------------------- */ +#ifdef CONFIG_M5272 +static void __inline__ fec_get_mac(struct net_device *dev) { - struct fec_enet_private *fep; - mii_list_t *mip; - int retval; - - /* Add PHY address to register command */ - fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(dev); + unsigned char *iap, tmpaddr[ETH_ALEN]; - regval |= fep->phy_addr << 23; - retval = 0; - - if ((mip = mii_free) != NULL) { - mii_free = mip->mii_next; - mip->mii_regval = regval; - mip->mii_func = func; - mip->mii_next = NULL; - if (mii_head) { - mii_tail->mii_next = mip; - mii_tail = mip; - } else { - mii_head = mii_tail = mip; - writel(regval, fep->hwp + FEC_MII_DATA); - } + if (FEC_FLASHMAC) { + /* + * Get MAC address from FLASH. + * If it is all 1's or 0's, use the default. + */ + iap = (unsigned char *)FEC_FLASHMAC; + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) + iap = fec_mac_default; + if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && + (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) + iap = fec_mac_default; } else { - retval = 1; + *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); + *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + iap = &tmpaddr[0]; } - return retval; -} - -static int -mii_queue(struct net_device *dev, int regval, - void (*func)(uint, struct net_device *)) -{ - struct fec_enet_private *fep; - unsigned long flags; - int retval; - fep = netdev_priv(dev); - spin_lock_irqsave(&fep->mii_lock, flags); - retval = mii_queue_unlocked(dev, regval, func); - spin_unlock_irqrestore(&fep->mii_lock, flags); - return retval; -} - -static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) -{ - if(!c) - return; + memcpy(dev->dev_addr, iap, ETH_ALEN); - for (; c->mii_data != mk_mii_end; c++) - mii_queue(dev, c->mii_data, c->funct); + /* Adjust MAC if using default MAC address */ + if (iap == fec_mac_default) + dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; } +#endif -static void mii_parse_sr(uint mii_reg, struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; - - status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); - - if (mii_reg & 0x0004) - status |= PHY_STAT_LINK; - if (mii_reg & 0x0010) - status |= PHY_STAT_FAULT; - if (mii_reg & 0x0020) - status |= PHY_STAT_ANC; - *s = status; -} +/* ------------------------------------------------------------------------- */ -static void mii_parse_cr(uint mii_reg, struct net_device *dev) +/* + * Phy section + */ +static void fec_enet_adjust_link(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; - - status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); - - if (mii_reg & 0x1000) - status |= PHY_CONF_ANE; - if (mii_reg & 0x4000) - status |= PHY_CONF_LOOP; - *s = status; -} + struct phy_device *phy_dev = fep->phy_dev; + unsigned long flags; -static void mii_parse_anar(uint mii_reg, struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; - - status = *s & ~(PHY_CONF_SPMASK); - - if (mii_reg & 0x0020) - status |= PHY_CONF_10HDX; - if (mii_reg & 0x0040) - status |= PHY_CONF_10FDX; - if (mii_reg & 0x0080) - status |= PHY_CONF_100HDX; - if (mii_reg & 0x00100) - status |= PHY_CONF_100FDX; - *s = status; -} + int status_change = 0; -/* ------------------------------------------------------------------------- */ -/* The Level one LXT970 is used by many boards */ + spin_lock_irqsave(&fep->hw_lock, flags); -#define MII_LXT970_MIRROR 16 /* Mirror register */ -#define MII_LXT970_IER 17 /* Interrupt Enable Register */ -#define MII_LXT970_ISR 18 /* Interrupt Status Register */ -#define MII_LXT970_CONFIG 19 /* Configuration Register */ -#define MII_LXT970_CSR 20 /* Chip Status Register */ + /* Prevent a state halted on mii error */ + if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { + phy_dev->state = PHY_RESUMING; + goto spin_unlock; + } -static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; + /* Duplex link change */ + if (phy_dev->link) { + if (fep->full_duplex != phy_dev->duplex) { + fec_restart(dev, phy_dev->duplex); + status_change = 1; + } + } - status = *s & ~(PHY_STAT_SPMASK); - if (mii_reg & 0x0800) { - if (mii_reg & 0x1000) - status |= PHY_STAT_100FDX; + /* Link on or off change */ + if (phy_dev->link != fep->link) { + fep->link = phy_dev->link; + if (phy_dev->link) + fec_restart(dev, phy_dev->duplex); else - status |= PHY_STAT_100HDX; - } else { - if (mii_reg & 0x1000) - status |= PHY_STAT_10FDX; - else - status |= PHY_STAT_10HDX; + fec_stop(dev); + status_change = 1; } - *s = status; -} - -static phy_cmd_t const phy_cmd_lxt970_config[] = { - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ - { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { - /* read SR and ISR to acknowledge */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_read(MII_LXT970_ISR), NULL }, - - /* find out the current status */ - { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ - { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, - { mk_mii_end, } - }; -static phy_info_t const phy_info_lxt970 = { - .id = 0x07810000, - .name = "LXT970", - .config = phy_cmd_lxt970_config, - .startup = phy_cmd_lxt970_startup, - .ack_int = phy_cmd_lxt970_ack_int, - .shutdown = phy_cmd_lxt970_shutdown -}; -/* ------------------------------------------------------------------------- */ -/* The Level one LXT971 is used on some of my custom boards */ - -/* register definitions for the 971 */ +spin_unlock: + spin_unlock_irqrestore(&fep->hw_lock, flags); -#define MII_LXT971_PCR 16 /* Port Control Register */ -#define MII_LXT971_SR2 17 /* Status Register 2 */ -#define MII_LXT971_IER 18 /* Interrupt Enable Register */ -#define MII_LXT971_ISR 19 /* Interrupt Status Register */ -#define MII_LXT971_LCR 20 /* LED Control Register */ -#define MII_LXT971_TCR 30 /* Transmit Control Register */ + if (status_change) + phy_print_status(phy_dev); +} /* - * I had some nice ideas of running the MDIO faster... - * The 971 should support 8MHz and I tried it, but things acted really - * weird, so 2.5 MHz ought to be enough for anyone... + * NOTE: a MII transaction is during around 25 us, so polling it... */ - -static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; + struct fec_enet_private *fep = bus->priv; + int timeout = FEC_MII_TIMEOUT; - status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); + fep->mii_timeout = 0; - if (mii_reg & 0x0400) { - fep->link = 1; - status |= PHY_STAT_LINK; - } else { - fep->link = 0; - } - if (mii_reg & 0x0080) - status |= PHY_STAT_ANC; - if (mii_reg & 0x4000) { - if (mii_reg & 0x0200) - status |= PHY_STAT_100FDX; - else - status |= PHY_STAT_100HDX; - } else { - if (mii_reg & 0x0200) - status |= PHY_STAT_10FDX; - else - status |= PHY_STAT_10HDX; + /* clear MII end of transfer bit*/ + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + + /* start a read op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { + cpu_relax(); + if (timeout-- < 0) { + fep->mii_timeout = 1; + printk(KERN_ERR "FEC: MDIO read timeout\n"); + return -ETIMEDOUT; + } } - if (mii_reg & 0x0008) - status |= PHY_STAT_FAULT; - *s = status; + /* return value */ + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); } -static phy_cmd_t const phy_cmd_lxt971_config[] = { - /* limit to 10MBit because my prototype board - * doesn't work with 100. */ - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ - { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ - /* Somehow does the 971 tell me that the link is down - * the first read after power-up. - * read here to get a valid value in ack_int */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { - /* acknowledge the int before reading status ! */ - { mk_mii_read(MII_LXT971_ISR), NULL }, - /* find out the current status */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ - { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, - { mk_mii_end, } - }; -static phy_info_t const phy_info_lxt971 = { - .id = 0x0001378e, - .name = "LXT971", - .config = phy_cmd_lxt971_config, - .startup = phy_cmd_lxt971_startup, - .ack_int = phy_cmd_lxt971_ack_int, - .shutdown = phy_cmd_lxt971_shutdown -}; - -/* ------------------------------------------------------------------------- */ -/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ - -/* register definitions */ - -#define MII_QS6612_MCR 17 /* Mode Control Register */ -#define MII_QS6612_FTR 27 /* Factory Test Register */ -#define MII_QS6612_MCO 28 /* Misc. Control Register */ -#define MII_QS6612_ISR 29 /* Interrupt Source Register */ -#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ -#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ - -static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) +static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; + struct fec_enet_private *fep = bus->priv; + int timeout = FEC_MII_TIMEOUT; - status = *s & ~(PHY_STAT_SPMASK); + fep->mii_timeout = 0; - switch((mii_reg >> 2) & 7) { - case 1: status |= PHY_STAT_10HDX; break; - case 2: status |= PHY_STAT_100HDX; break; - case 5: status |= PHY_STAT_10FDX; break; - case 6: status |= PHY_STAT_100FDX; break; -} - - *s = status; -} - -static phy_cmd_t const phy_cmd_qs6612_config[] = { - /* The PHY powers up isolated on the RPX, - * so send a command to allow operation. - */ - { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, - - /* parse cr and anar to get some info */ - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ - { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { - /* we need to read ISR, SR and ANER to acknowledge */ - { mk_mii_read(MII_QS6612_ISR), NULL }, - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_read(MII_REG_ANER), NULL }, - - /* read pcr to get info */ - { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ - { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, - { mk_mii_end, } - }; -static phy_info_t const phy_info_qs6612 = { - .id = 0x00181440, - .name = "QS6612", - .config = phy_cmd_qs6612_config, - .startup = phy_cmd_qs6612_startup, - .ack_int = phy_cmd_qs6612_ack_int, - .shutdown = phy_cmd_qs6612_shutdown -}; - -/* ------------------------------------------------------------------------- */ -/* AMD AM79C874 phy */ + /* clear MII end of transfer bit*/ + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); -/* register definitions for the 874 */ + /* start a read op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) { + cpu_relax(); + if (timeout-- < 0) { + fep->mii_timeout = 1; + printk(KERN_ERR "FEC: MDIO write timeout\n"); + return -ETIMEDOUT; + } + } -#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ -#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ -#define MII_AM79C874_DR 18 /* Diagnostic Register */ -#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ -#define MII_AM79C874_MCR 21 /* ModeControl Register */ -#define MII_AM79C874_DC 23 /* Disconnect Counter */ -#define MII_AM79C874_REC 24 /* Recieve Error Counter */ + return 0; +} -static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) +static int fec_enet_mdio_reset(struct mii_bus *bus) { - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - uint status; - - status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); - - if (mii_reg & 0x0080) - status |= PHY_STAT_ANC; - if (mii_reg & 0x0400) - status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); - else - status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); - - *s = status; + return 0; } -static phy_cmd_t const phy_cmd_am79c874_config[] = { - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ - { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { - /* find out the current status */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, - /* we only need to read ISR to acknowledge */ - { mk_mii_read(MII_AM79C874_ICSR), NULL }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ - { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, - { mk_mii_end, } - }; -static phy_info_t const phy_info_am79c874 = { - .id = 0x00022561, - .name = "AM79C874", - .config = phy_cmd_am79c874_config, - .startup = phy_cmd_am79c874_startup, - .ack_int = phy_cmd_am79c874_ack_int, - .shutdown = phy_cmd_am79c874_shutdown -}; - - -/* ------------------------------------------------------------------------- */ -/* Kendin KS8721BL phy */ - -/* register definitions for the 8721 */ - -#define MII_KS8721BL_RXERCR 21 -#define MII_KS8721BL_ICSR 27 -#define MII_KS8721BL_PHYCR 31 - -static phy_cmd_t const phy_cmd_ks8721bl_config[] = { - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ - { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { - /* find out the current status */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - /* we only need to read ISR to acknowledge */ - { mk_mii_read(MII_KS8721BL_ICSR), NULL }, - { mk_mii_end, } - }; -static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ - { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, - { mk_mii_end, } - }; -static phy_info_t const phy_info_ks8721bl = { - .id = 0x00022161, - .name = "KS8721BL", - .config = phy_cmd_ks8721bl_config, - .startup = phy_cmd_ks8721bl_startup, - .ack_int = phy_cmd_ks8721bl_ack_int, - .shutdown = phy_cmd_ks8721bl_shutdown -}; - -/* ------------------------------------------------------------------------- */ -/* register definitions for the DP83848 */ - -#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ - -static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) +static int fec_enet_mii_probe(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); - - *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); - - /* Link up */ - if (mii_reg & 0x0001) { - fep->link = 1; - *s |= PHY_STAT_LINK; - } else - fep->link = 0; - /* Status of link */ - if (mii_reg & 0x0010) /* Autonegotioation complete */ - *s |= PHY_STAT_ANC; - if (mii_reg & 0x0002) { /* 10MBps? */ - if (mii_reg & 0x0004) /* Full Duplex? */ - *s |= PHY_STAT_10FDX; - else - *s |= PHY_STAT_10HDX; - } else { /* 100 Mbps? */ - if (mii_reg & 0x0004) /* Full Duplex? */ - *s |= PHY_STAT_100FDX; - else - *s |= PHY_STAT_100HDX; - } - if (mii_reg & 0x0008) - *s |= PHY_STAT_FAULT; -} - -static phy_info_t phy_info_dp83848= { - 0x020005c9, - "DP83848", + struct phy_device *phy_dev = NULL; + int phy_addr; - (const phy_cmd_t []) { /* config */ - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* startup - enable interrupts */ - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* shutdown */ - { mk_mii_end, } - }, -}; + /* find the first phy */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (fep->mii_bus->phy_map[phy_addr]) { + phy_dev = fep->mii_bus->phy_map[phy_addr]; + break; + } + } -static phy_info_t phy_info_lan8700 = { - 0x0007C0C, - "LAN8700", - (const phy_cmd_t []) { /* config */ - { mk_mii_read(MII_REG_CR), mii_parse_cr }, - { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* startup */ - { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ - { mk_mii_read(MII_REG_SR), mii_parse_sr }, - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* act_int */ - { mk_mii_end, } - }, - (const phy_cmd_t []) { /* shutdown */ - { mk_mii_end, } - }, -}; -/* ------------------------------------------------------------------------- */ + if (!phy_dev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENODEV; + } -static phy_info_t const * const phy_info[] = { - &phy_info_lxt970, - &phy_info_lxt971, - &phy_info_qs6612, - &phy_info_am79c874, - &phy_info_ks8721bl, - &phy_info_dp83848, - &phy_info_lan8700, - NULL -}; + /* attach the mac to the phy */ + phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), + &fec_enet_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); + } -/* ------------------------------------------------------------------------- */ -#ifdef HAVE_mii_link_interrupt -static irqreturn_t -mii_link_interrupt(int irq, void * dev_id); + /* mask with MAC supported features */ + phy_dev->supported &= PHY_BASIC_FEATURES; + phy_dev->advertising = phy_dev->supported; -/* - * This is specific to the MII interrupt setup of the M5272EVB. - */ -static void __inline__ fec_request_mii_intr(struct net_device *dev) -{ - if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) - printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); -} + fep->phy_dev = phy_dev; + fep->link = 0; + fep->full_duplex = 0; -static void __inline__ fec_disable_phy_intr(struct net_device *dev) -{ - free_irq(66, dev); + return 0; } -#endif -#ifdef CONFIG_M5272 -static void __inline__ fec_get_mac(struct net_device *dev) +static int fec_enet_mii_init(struct platform_device *pdev) { + struct net_device *dev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(dev); - unsigned char *iap, tmpaddr[ETH_ALEN]; + int err = -ENXIO, i; - if (FEC_FLASHMAC) { - /* - * Get MAC address from FLASH. - * If it is all 1's or 0's, use the default. - */ - iap = (unsigned char *)FEC_FLASHMAC; - if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && - (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) - iap = fec_mac_default; - if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && - (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) - iap = fec_mac_default; - } else { - *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); - *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); - iap = &tmpaddr[0]; - } - - memcpy(dev->dev_addr, iap, ETH_ALEN); - - /* Adjust MAC if using default MAC address */ - if (iap == fec_mac_default) - dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; -} -#endif + fep->mii_timeout = 0; -/* ------------------------------------------------------------------------- */ - -static void mii_display_status(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - volatile uint *s = &(fep->phy_status); + /* + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) + */ + fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1; + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - if (!fep->link && !fep->old_link) { - /* Link is still down - don't print anything */ - return; + fep->mii_bus = mdiobus_alloc(); + if (fep->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; } - printk("%s: status: ", dev->name); - - if (!fep->link) { - printk("link down"); - } else { - printk("link up"); - - switch(*s & PHY_STAT_SPMASK) { - case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; - case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; - case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; - case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; - default: - printk(", Unknown speed/duplex"); - } - - if (*s & PHY_STAT_ANC) - printk(", auto-negotiation complete"); + fep->mii_bus->name = "fec_enet_mii_bus"; + fep->mii_bus->read = fec_enet_mdio_read; + fep->mii_bus->write = fec_enet_mdio_write; + fep->mii_bus->reset = fec_enet_mdio_reset; + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + fep->mii_bus->priv = fep; + fep->mii_bus->parent = &pdev->dev; + + fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!fep->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; } - if (*s & PHY_STAT_FAULT) - printk(", remote fault"); - - printk(".\n"); -} - -static void mii_display_config(struct work_struct *work) -{ - struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); - struct net_device *dev = fep->netdev; - uint status = fep->phy_status; + for (i = 0; i < PHY_MAX_ADDR; i++) + fep->mii_bus->irq[i] = PHY_POLL; - /* - ** When we get here, phy_task is already removed from - ** the workqueue. It is thus safe to allow to reuse it. - */ - fep->mii_phy_task_queued = 0; - printk("%s: config: auto-negotiation ", dev->name); - - if (status & PHY_CONF_ANE) - printk("on"); - else - printk("off"); + platform_set_drvdata(dev, fep->mii_bus); - if (status & PHY_CONF_100FDX) - printk(", 100FDX"); - if (status & PHY_CONF_100HDX) - printk(", 100HDX"); - if (status & PHY_CONF_10FDX) - printk(", 10FDX"); - if (status & PHY_CONF_10HDX) - printk(", 10HDX"); - if (!(status & PHY_CONF_SPMASK)) - printk(", No speed/duplex selected?"); + if (mdiobus_register(fep->mii_bus)) + goto err_out_free_mdio_irq; - if (status & PHY_CONF_LOOP) - printk(", loopback enabled"); + if (fec_enet_mii_probe(dev) != 0) + goto err_out_unregister_bus; - printk(".\n"); + return 0; - fep->sequence_done = 1; +err_out_unregister_bus: + mdiobus_unregister(fep->mii_bus); +err_out_free_mdio_irq: + kfree(fep->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(fep->mii_bus); +err_out: + return err; } -static void mii_relink(struct work_struct *work) +static void fec_enet_mii_remove(struct fec_enet_private *fep) { - struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); - struct net_device *dev = fep->netdev; - int duplex; - - /* - ** When we get here, phy_task is already removed from - ** the workqueue. It is thus safe to allow to reuse it. - */ - fep->mii_phy_task_queued = 0; - fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; - mii_display_status(dev); - fep->old_link = fep->link; - - if (fep->link) { - duplex = 0; - if (fep->phy_status - & (PHY_STAT_100FDX | PHY_STAT_10FDX)) - duplex = 1; - fec_restart(dev, duplex); - } else - fec_stop(dev); + if (fep->phy_dev) + phy_disconnect(fep->phy_dev); + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); } -/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ -static void mii_queue_relink(uint mii_reg, struct net_device *dev) +static int fec_enet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; - /* - * We cannot queue phy_task twice in the workqueue. It - * would cause an endless loop in the workqueue. - * Fortunately, if the last mii_relink entry has not yet been - * executed now, it will do the job for the current interrupt, - * which is just what we want. - */ - if (fep->mii_phy_task_queued) - return; + if (!phydev) + return -ENODEV; - fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, mii_relink); - schedule_work(&fep->phy_task); + return phy_ethtool_gset(phydev, cmd); } -/* mii_queue_config is called in interrupt context from fec_enet_mii */ -static void mii_queue_config(uint mii_reg, struct net_device *dev) +static int fec_enet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; - if (fep->mii_phy_task_queued) - return; + if (!phydev) + return -ENODEV; - fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, mii_display_config); - schedule_work(&fep->phy_task); + return phy_ethtool_sset(phydev, cmd); } -phy_cmd_t const phy_cmd_relink[] = { - { mk_mii_read(MII_REG_CR), mii_queue_relink }, - { mk_mii_end, } - }; -phy_cmd_t const phy_cmd_config[] = { - { mk_mii_read(MII_REG_CR), mii_queue_config }, - { mk_mii_end, } - }; - -/* Read remainder of PHY ID. */ -static void -mii_discover_phy3(uint mii_reg, struct net_device *dev) +static void fec_enet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { - struct fec_enet_private *fep; - int i; - - fep = netdev_priv(dev); - fep->phy_id |= (mii_reg & 0xffff); - printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); - - for(i = 0; phy_info[i]; i++) { - if(phy_info[i]->id == (fep->phy_id >> 4)) - break; - } - - if (phy_info[i]) - printk(" -- %s\n", phy_info[i]->name); - else - printk(" -- unknown PHY!\n"); + struct fec_enet_private *fep = netdev_priv(dev); - fep->phy = phy_info[i]; - fep->phy_id_done = 1; + strcpy(info->driver, fep->pdev->dev.driver->name); + strcpy(info->version, "Revision: 1.0"); + strcpy(info->bus_info, dev_name(&dev->dev)); } -/* Scan all of the MII PHY addresses looking for someone to respond - * with a valid ID. This usually happens quickly. - */ -static void -mii_discover_phy(uint mii_reg, struct net_device *dev) -{ - struct fec_enet_private *fep; - uint phytype; - - fep = netdev_priv(dev); - - if (fep->phy_addr < 32) { - if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { - - /* Got first part of ID, now get remainder */ - fep->phy_id = phytype << 16; - mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2), - mii_discover_phy3); - } else { - fep->phy_addr++; - mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1), - mii_discover_phy); - } - } else { - printk("FEC: No PHY device found.\n"); - /* Disable external MII interface */ - writel(0, fep->hwp + FEC_MII_SPEED); - fep->phy_speed = 0; -#ifdef HAVE_mii_link_interrupt - fec_disable_phy_intr(dev); -#endif - } -} +static struct ethtool_ops fec_enet_ethtool_ops = { + .get_settings = fec_enet_get_settings, + .set_settings = fec_enet_set_settings, + .get_drvinfo = fec_enet_get_drvinfo, + .get_link = ethtool_op_get_link, +}; -/* This interrupt occurs when the PHY detects a link change */ -#ifdef HAVE_mii_link_interrupt -static irqreturn_t -mii_link_interrupt(int irq, void * dev_id) +static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct net_device *dev = dev_id; struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; - mii_do_cmd(dev, fep->phy->ack_int); - mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ + if (!phydev) + return -ENODEV; - return IRQ_HANDLED; + return phy_mii_ioctl(phydev, if_mii(rq), cmd); } -#endif static void fec_enet_free_buffers(struct net_device *dev) { @@ -1509,35 +915,8 @@ fec_enet_open(struct net_device *dev) if (ret) return ret; - fep->sequence_done = 0; - fep->link = 0; - - fec_restart(dev, 1); - - if (fep->phy) { - mii_do_cmd(dev, fep->phy->ack_int); - mii_do_cmd(dev, fep->phy->config); - mii_do_cmd(dev, phy_cmd_config); /* display configuration */ - - /* Poll until the PHY tells us its configuration - * (not link state). - * Request is initiated by mii_do_cmd above, but answer - * comes by interrupt. - * This should take about 25 usec per register at 2.5 MHz, - * and we read approximately 5 registers. - */ - while(!fep->sequence_done) - schedule(); - - mii_do_cmd(dev, fep->phy->startup); - } - - /* Set the initial link state to true. A lot of hardware - * based on this device does not implement a PHY interrupt, - * so we are never notified of link change. - */ - fep->link = 1; - + /* schedule a link state check */ + phy_start(fep->phy_dev); netif_start_queue(dev); fep->opened = 1; return 0; @@ -1550,6 +929,7 @@ fec_enet_close(struct net_device *dev) /* Don't know what to do yet. */ fep->opened = 0; + phy_stop(fep->phy_dev); netif_stop_queue(dev); fec_stop(dev); @@ -1574,7 +954,7 @@ fec_enet_close(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; @@ -1604,16 +984,16 @@ static void set_multicast_list(struct net_device *dev) writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now */ - if (!(dmi->dmi_addr[0] & 1)) + if (!(ha->addr[0] & 1)) continue; /* calculate crc32 value of mac address */ crc = 0xffffffff; - for (i = 0; i < dmi->dmi_addrlen; i++) { - data = dmi->dmi_addr[i]; + for (i = 0; i < dev->addr_len; i++) { + data = ha->addr[i]; for (bit = 0; bit < 8; bit++, data >>= 1) { crc = (crc >> 1) ^ (((crc ^ data) & 1) ? CRC32_POLY : 0); @@ -1666,6 +1046,7 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, + .ndo_do_ioctl = fec_enet_ioctl, }; /* @@ -1689,7 +1070,6 @@ static int fec_enet_init(struct net_device *dev, int index) } spin_lock_init(&fep->hw_lock); - spin_lock_init(&fep->mii_lock); fep->index = index; fep->hwp = (void __iomem *)dev->base_addr; @@ -1716,20 +1096,10 @@ static int fec_enet_init(struct net_device *dev, int index) fep->rx_bd_base = cbd_base; fep->tx_bd_base = cbd_base + RX_RING_SIZE; -#ifdef HAVE_mii_link_interrupt - fec_request_mii_intr(dev); -#endif /* The FEC Ethernet specific entries in the device structure */ dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &fec_netdev_ops; - - for (i=0; i<NMII-1; i++) - mii_cmds[i].mii_next = &mii_cmds[i+1]; - mii_free = mii_cmds; - - /* Set MII speed to 2.5 MHz */ - fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) - / 2500000) / 2) & 0x3F) << 1; + dev->ethtool_ops = &fec_enet_ethtool_ops; /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; @@ -1760,13 +1130,6 @@ static int fec_enet_init(struct net_device *dev, int index) fec_restart(dev, 0); - /* Queue up command to detect the PHY and initialize the - * remainder of the interface. - */ - fep->phy_id_done = 0; - fep->phy_addr = 0; - mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); - return 0; } @@ -1835,8 +1198,7 @@ fec_restart(struct net_device *dev, int duplex) writel(0, fep->hwp + FEC_R_DES_ACTIVE); /* Enable interrupts we wish to service */ - writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, - fep->hwp + FEC_IMASK); + writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK); } static void @@ -1859,7 +1221,6 @@ fec_stop(struct net_device *dev) /* Clear outstanding MII command interrupts. */ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); - writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); } @@ -1891,6 +1252,7 @@ fec_probe(struct platform_device *pdev) memset(fep, 0, sizeof(*fep)); ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); + fep->pdev = pdev; if (!ndev->base_addr) { ret = -ENOMEM; @@ -1926,13 +1288,24 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_init; + ret = fec_enet_mii_init(pdev); + if (ret) + goto failed_mii_init; + ret = register_netdev(ndev); if (ret) goto failed_register; + printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq); + return 0; failed_register: + fec_enet_mii_remove(fep); +failed_mii_init: failed_init: clk_disable(fep->clk); clk_put(fep->clk); @@ -1959,6 +1332,7 @@ fec_drv_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); fec_stop(ndev); + fec_enet_mii_remove(fep); clk_disable(fep->clk); clk_put(fep->clk); iounmap((void __iomem *)ndev->base_addr); diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 0dbd7219bbd..0376c3e472a 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/crc32.h> @@ -435,7 +436,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) DMA_FROM_DEVICE); length = status & BCOM_FEC_RX_BD_LEN_MASK; skb_put(rskb, length - 4); /* length without CRC32 */ - rskb->dev = dev; rskb->protocol = eth_type_trans(rskb, dev); netif_rx(rskb); @@ -575,12 +575,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev) out_be32(&fec->gaddr2, 0xffffffff); } else { u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 gaddr1 = 0x00000000; u32 gaddr2 = 0x00000000; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr) >> 26; if (crc >= 32) gaddr1 |= 1 << (crc-32); else diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index ee0f3c6d3f8..7658a082e39 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -14,6 +14,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/of_platform.h> +#include <linux/slab.h> #include <linux/of_mdio.h> #include <asm/io.h> #include <asm/mpc52xx.h> diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index ca05e566202..a1c0e7bb70e 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -59,6 +59,7 @@ #include <linux/init.h> #include <linux/if_vlan.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <asm/irq.h> #include <asm/io.h> @@ -2147,7 +2148,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int i; u32 offset = 0; u32 bcnt; - u32 size = skb->len-skb->data_len; + u32 size = skb_headlen(skb); u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 empty_slots; struct ring_desc* put_tx; @@ -2268,7 +2269,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, unsigned int i; u32 offset = 0; u32 bcnt; - u32 size = skb->len-skb->data_len; + u32 size = skb_headlen(skb); u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 empty_slots; struct ring_desc_ex* put_tx; @@ -3103,12 +3104,14 @@ static void nv_set_multicast(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; } else { - struct dev_mc_list *walk; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(walk, dev) { + netdev_for_each_mc_addr(ha, dev) { + unsigned char *addr = ha->addr; u32 a, b; - a = le32_to_cpu(*(__le32 *) walk->dmi_addr); - b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); + + a = le32_to_cpu(*(__le32 *) addr); + b = le16_to_cpu(*(__le16 *) (&addr[4])); alwaysOn[0] &= a; alwaysOff[0] &= ~a; alwaysOn[1] &= b; @@ -5898,7 +5901,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i /* Limit the number of tx's outstanding for hw bug */ if (id->driver_data & DEV_NEED_TX_LIMIT) { np->tx_limit = 1; - if ((id->driver_data & DEV_NEED_TX_LIMIT2) && + if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && pci_dev->revision >= 0xA2) np->tx_limit = 0; } diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index cf4f674f9e2..714da967fa1 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -19,7 +19,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -34,6 +33,7 @@ #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/of_device.h> +#include <linux/gfp.h> #include <asm/immap_cpm2.h> #include <asm/mpc8260.h> @@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index cd2c6cca5f2..7eff92ef01d 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -19,7 +19,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -33,6 +32,7 @@ #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/gfp.h> #include <asm/irq.h> #include <asm/uaccess.h> @@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index c490a466cae..7f0591e43cd 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -19,7 +19,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -224,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index c3f061957c0..032073d1e3d 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -82,6 +82,7 @@ #include <linux/tcp.h> #include <linux/udp.h> #include <linux/in.h> +#include <linux/net_tstamp.h> #include <asm/io.h> #include <asm/irq.h> @@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev) rctrl |= RCTRL_PADDING(priv->padding); } + /* Insert receive time stamps into padding alignment bytes */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { + rctrl &= ~RCTRL_PAL_MASK; + rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8); + priv->padding = 8; + } + /* keep vlan related bits if it's enabled */ if (priv->vlgrp) { rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; @@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv) /* Returns 1 if incoming frames use an FCB */ static inline int gfar_uses_fcb(struct gfar_private *priv) { - return priv->vlgrp || priv->rx_csum_enable; + return priv->vlgrp || priv->rx_csum_enable || + (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); } static void free_tx_pointers(struct gfar_private *priv) @@ -676,7 +685,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) priv->rx_queue[i] = NULL; for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( + priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( sizeof (struct gfar_priv_tx_q), GFP_KERNEL); if (!priv->tx_queue[i]) { err = -ENOMEM; @@ -689,7 +698,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) } for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( + priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( sizeof (struct gfar_priv_rx_q), GFP_KERNEL); if (!priv->rx_queue[i]) { err = -ENOMEM; @@ -742,7 +751,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | - FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; + FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | + FSL_GIANFAR_DEV_HAS_TIMER; ctype = of_get_property(np, "phy-connection-type", NULL); @@ -772,6 +782,48 @@ err_grp_init: return err; } +static int gfar_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->hwts_rx_en = 0; + break; + default: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + /* Ioctl MII Interface */ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -780,6 +832,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; + if (cmd == SIOCSHWTSTAMP) + return gfar_hwtstamp_ioctl(dev, rq, cmd); + if (!priv->phydev) return -ENODEV; @@ -982,7 +1037,8 @@ static int gfar_probe(struct of_device *ofdev, else priv->padding = 0; - if (dev->features & NETIF_F_IP_CSUM) + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->hard_header_len += GMAC_FCB_LEN; /* Program the isrg regs only if number of grps > 1 */ @@ -998,7 +1054,7 @@ static int gfar_probe(struct of_device *ofdev, } /* Need to reverse the bit maps as bit_map's MSB is q0 - * but, for_each_bit parses from right to left, which + * but, for_each_set_bit parses from right to left, which * basically reverses the queue numbers */ for (i = 0; i< priv->num_grps; i++) { priv->gfargrp[i].tx_bit_map = reverse_bitmap( @@ -1011,7 +1067,7 @@ static int gfar_probe(struct of_device *ofdev, * also assign queues to groups */ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { priv->gfargrp[grp_idx].num_rx_queues = 0x0; - for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, + for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, priv->num_rx_queues) { priv->gfargrp[grp_idx].num_rx_queues++; priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; @@ -1019,7 +1075,7 @@ static int gfar_probe(struct of_device *ofdev, rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); } priv->gfargrp[grp_idx].num_tx_queues = 0x0; - for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, + for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, priv->num_tx_queues) { priv->gfargrp[grp_idx].num_tx_queues++; priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; @@ -1120,10 +1176,10 @@ static int gfar_probe(struct of_device *ofdev, /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); for (i = 0; i < priv->num_rx_queues; i++) - printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", dev->name, i, priv->rx_queue[i]->rx_ring_size); for(i = 0; i < priv->num_tx_queues; i++) - printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", dev->name, i, priv->tx_queue[i]->tx_ring_size); return 0; @@ -1638,13 +1694,13 @@ static void free_skb_resources(struct gfar_private *priv) /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; - if(!tx_queue->tx_skbuff) + if(tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if(!rx_queue->rx_skbuff) + if(rx_queue->rx_skbuff) free_skb_rx_queue(rx_queue); } @@ -1709,7 +1765,7 @@ void gfar_configure_coalescing(struct gfar_private *priv, if (priv->mode == MQ_MG_MODE) { baddr = ®s->txic0; - for_each_bit (i, &tx_mask, priv->num_tx_queues) { + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { if (likely(priv->tx_queue[i]->txcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->tx_queue[i]->txic); @@ -1717,7 +1773,7 @@ void gfar_configure_coalescing(struct gfar_private *priv, } baddr = ®s->rxic0; - for_each_bit (i, &rx_mask, priv->num_rx_queues) { + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { if (likely(priv->rx_queue[i]->rxcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->rx_queue[i]->rxic); @@ -1926,23 +1982,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct gfar __iomem *regs = NULL; struct txfcb *fcb = NULL; - struct txbd8 *txbdp, *txbdp_start, *base; + struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; - int i, rq = 0; + int i, rq = 0, do_tstamp = 0; u32 bufaddr; unsigned long flags; - unsigned int nr_frags, length; - + unsigned int nr_frags, nr_txbds, length; + union skb_shared_tx *shtx; rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; txq = netdev_get_tx_queue(dev, rq); base = tx_queue->tx_bd_base; regs = tx_queue->grp->regs; + shtx = skb_tx(skb); + + /* check if time stamp should be generated */ + if (unlikely(shtx->hardware && priv->hwts_tx_en)) + do_tstamp = 1; /* make space for additional header when fcb is needed */ if (((skb->ip_summed == CHECKSUM_PARTIAL) || - (priv->vlgrp && vlan_tx_tag_present(skb))) && + (priv->vlgrp && vlan_tx_tag_present(skb)) || + unlikely(do_tstamp)) && (skb_headroom(skb) < GMAC_FCB_LEN)) { struct sk_buff *skb_new; @@ -1959,8 +2021,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; + /* calculate the required number of TxBDs for this skb */ + if (unlikely(do_tstamp)) + nr_txbds = nr_frags + 2; + else + nr_txbds = nr_frags + 1; + /* check if there is space to queue this packet */ - if ((nr_frags+1) > tx_queue->num_txbdfree) { + if (nr_txbds > tx_queue->num_txbdfree) { /* no space, stop the queue */ netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; @@ -1972,9 +2040,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txq->tx_packets ++; txbdp = txbdp_start = tx_queue->cur_tx; + lstatus = txbdp->lstatus; + + /* Time stamp insertion requires one additional TxBD */ + if (unlikely(do_tstamp)) + txbdp_tstamp = txbdp = next_txbd(txbdp, base, + tx_queue->tx_ring_size); if (nr_frags == 0) { - lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + if (unlikely(do_tstamp)) + txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | + TXBD_INTERRUPT); + else + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { @@ -2020,11 +2098,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_tx_vlan(skb, fcb); } - /* setup the TxBD length and buffer pointer for the first BD */ + /* Setup tx hardware time stamping if requested */ + if (unlikely(do_tstamp)) { + shtx->in_progress = 1; + if (fcb == NULL) + fcb = gfar_add_fcb(skb); + fcb->ptp = 1; + lstatus |= BD_LFLAG(TXBD_TOE); + } + txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); + /* + * If time stamping is requested one additional TxBD must be set up. The + * first TxBD points to the FCB and must have a data length of + * GMAC_FCB_LEN. The second TxBD points to the actual frame data with + * the full frame length. + */ + if (unlikely(do_tstamp)) { + txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; + txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - GMAC_FCB_LEN); + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + } else { + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); + } /* * We can work in parallel with gfar_clean_tx_ring(), except @@ -2064,7 +2163,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); /* reduce TxBD free count */ - tx_queue->num_txbdfree -= (nr_frags + 1); + tx_queue->num_txbdfree -= (nr_txbds); dev->trans_start = jiffies; @@ -2255,16 +2354,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) struct net_device *dev = tx_queue->dev; struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; - struct txbd8 *bdp; + struct txbd8 *bdp, *next = NULL; struct txbd8 *lbdp = NULL; struct txbd8 *base = tx_queue->tx_bd_base; struct sk_buff *skb; int skb_dirtytx; int tx_ring_size = tx_queue->tx_ring_size; - int frags = 0; + int frags = 0, nr_txbds = 0; int i; int howmany = 0; u32 lstatus; + size_t buflen; + union skb_shared_tx *shtx; rx_queue = priv->rx_queue[tx_queue->qindex]; bdp = tx_queue->dirty_tx; @@ -2274,7 +2375,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) unsigned long flags; frags = skb_shinfo(skb)->nr_frags; - lbdp = skip_txbd(bdp, frags, base, tx_ring_size); + + /* + * When time stamping, one additional TxBD must be freed. + * Also, we need to dma_unmap_single() the TxPAL. + */ + shtx = skb_tx(skb); + if (unlikely(shtx->in_progress)) + nr_txbds = frags + 2; + else + nr_txbds = frags + 1; + + lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); lstatus = lbdp->lstatus; @@ -2283,10 +2395,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) (lstatus & BD_LENGTH_MASK)) break; - dma_unmap_single(&priv->ofdev->dev, - bdp->bufPtr, - bdp->length, - DMA_TO_DEVICE); + if (unlikely(shtx->in_progress)) { + next = next_txbd(bdp, base, tx_ring_size); + buflen = next->length + GMAC_FCB_LEN; + } else + buflen = bdp->length; + + dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, + buflen, DMA_TO_DEVICE); + + if (unlikely(shtx->in_progress)) { + struct skb_shared_hwtstamps shhwtstamps; + u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_tstamp_tx(skb, &shhwtstamps); + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next; + } bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next_txbd(bdp, base, tx_ring_size); @@ -2318,7 +2444,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) howmany++; spin_lock_irqsave(&tx_queue->txlock, flags); - tx_queue->num_txbdfree += frags + 1; + tx_queue->num_txbdfree += nr_txbds; spin_unlock_irqrestore(&tx_queue->txlock, flags); } @@ -2393,6 +2519,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) * as many bytes as needed to align the data properly */ skb_reserve(skb, alignamount); + GFAR_CB(skb)->alignamount = alignamount; return skb; } @@ -2473,6 +2600,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, skb_pull(skb, amount_pull); } + /* Get receive timestamp from the skb */ + if (priv->hwts_rx_en) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + u64 *ns = (u64 *) skb->data; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } + + if (priv->padding) + skb_pull(skb, priv->padding); + if (priv->rx_csum_enable) gfar_rx_checksum(skb, fcb); @@ -2509,8 +2647,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) bdp = rx_queue->cur_rx; base = rx_queue->rx_bd_base; - amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + - priv->padding; + amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { struct sk_buff *newskb; @@ -2533,13 +2670,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) newskb = skb; else if (skb) { /* - * We need to reset ->data to what it + * We need to un-reserve() the skb to what it * was before gfar_new_skb() re-aligned * it to an RXBUF_ALIGNMENT boundary * before we put the skb back on the * recycle list. */ - skb->data = skb->head + NET_SKB_PAD; + skb_reserve(skb, -GFAR_CB(skb)->alignamount); __skb_queue_head(&priv->rx_recycle, skb); } } else { @@ -2610,7 +2747,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) budget_per_queue = left_over_budget/num_queues; left_over_budget = 0; - for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { if (test_bit(i, &serviced_queues)) continue; rx_queue = priv->rx_queue[i]; @@ -2797,7 +2934,7 @@ static void adjust_link(struct net_device *dev) * whenever dev->flags is changed */ static void gfar_set_multi(struct net_device *dev) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; @@ -2870,13 +3007,12 @@ static void gfar_set_multi(struct net_device *dev) return; /* Parse the list, and set the appropriate bits */ - netdev_for_each_mc_addr(mc_ptr, dev) { + netdev_for_each_mc_addr(ha, dev) { if (idx < em_num) { - gfar_set_mac_for_addr(dev, idx, - mc_ptr->dmi_addr); + gfar_set_mac_for_addr(dev, idx, ha->addr); idx++; } else - gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); + gfar_set_hash_for_addr(dev, ha->addr); } } diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 3d72dc43dca..ac4a92e08c0 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -262,6 +262,7 @@ extern const char gfar_driver_version[]; #define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) +#define RCTRL_TS_ENABLE 0x01000000 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_VLEX 0x00002000 #define RCTRL_FILREN 0x00001000 @@ -539,7 +540,7 @@ struct txbd8 struct txfcb { u8 flags; - u8 reserved; + u8 ptp; /* Flag to enable tx timestamping */ u8 l4os; /* Level 4 Header Offset */ u8 l3os; /* Level 3 Header Offset */ u16 phcs; /* Pseudo-header Checksum */ @@ -566,6 +567,12 @@ struct rxfcb { u16 vlctl; /* VLAN control word */ }; +struct gianfar_skb_cb { + int alignamount; +}; + +#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) + struct rmon_mib { u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ @@ -879,6 +886,7 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 +#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 #if (MAXGROUPS == 2) #define DEFAULT_MAPPING 0xAA @@ -1094,6 +1102,10 @@ struct gfar_private { /* Network Statistics */ struct gfar_extra_stats extra_stats; + + /* HW time stamping enabled flag */ + int hwts_rx_en; + int hwts_tx_en; }; extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 1010367695e..9bda023c023 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -19,7 +19,6 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c index b98c6c51229..64f4094ac7f 100644 --- a/drivers/net/gianfar_sysfs.c +++ b/drivers/net/gianfar_sysfs.c @@ -24,7 +24,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/etherdevice.h> diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 2b9c1cbc9ec..fd491e40948 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -34,6 +34,7 @@ #include <linux/mii.h> #include <linux/of_device.h> #include <linux/of_platform.h> +#include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/byteorder.h> @@ -894,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit) else skb->ip_summed = CHECKSUM_NONE; - skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; netif_receive_skb(skb); @@ -989,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr) static void greth_set_hash_filter(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; struct greth_private *greth = netdev_priv(dev); struct greth_regs *regs = (struct greth_regs *) greth->regs; u32 mc_filter[2]; @@ -997,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev) mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = greth_hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = greth_hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 373546dd083..83f43bb835d 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -153,7 +153,6 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include <linux/time.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> @@ -1858,12 +1857,12 @@ static void set_rx_mode(struct net_device *dev) /* Too many to match, or accept all multicasts. */ writew(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i = 0; - netdev_for_each_mc_addr(mclist, dev) { - writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8); - writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]), + netdev_for_each_mc_addr(ha, dev) { + writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8); + writel(0x20000 | (*(u16 *)&ha->addr[4]), ioaddr + 0x104 + i*8); i++; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 689b9bd377a..4b52c767ad0 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -24,6 +24,7 @@ #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/timer.h> +#include <linux/slab.h> #include <net/ax25.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 0cab992b3d1..3e25f10cabd 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev) return -EINVAL; } if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) { - printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n", + printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n", dev->base_addr); return -EACCES; } diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index bdadf3e23c9..14f01d156db 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -61,6 +61,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/net.h> +#include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 9ee76b42668..52b14256e2c 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/sockios.h> #include <linux/workqueue.h> diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 91c5790c958..b8bdf9d51cd 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -48,7 +48,6 @@ #include <linux/net.h> #include <linux/in.h> #include <linux/if.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/bitops.h> diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 7db0a1c3216..66e88bd59ca 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/in.h> #include <linux/inet.h> +#include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 35c936175bb..f3a96b84391 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -158,7 +158,6 @@ #include <linux/in.h> #include <linux/fcntl.h> #include <linux/ptrace.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index b766a69bf0c..0f3f6c2e694 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -102,7 +102,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/eisa.h> #include <linux/pci.h> @@ -2100,15 +2099,15 @@ static void hp100_set_multicast_list(struct net_device *dev) } else { int i, idx; u_char *addrs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; memset(&lp->hash_bytes, 0x00, 8); #ifdef HP100_DEBUG printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, netdev_mc_count(dev)); #endif - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 0x01) { /* multicast address? */ #ifdef HP100_DEBUG printk("hp100: %s: multicast = %pM, ", diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c index 3e3528ade25..b6060f7538d 100644 --- a/drivers/net/hplance.c +++ b/drivers/net/hplance.c @@ -10,7 +10,6 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index d496b6f4a47..24724b4ad70 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index fb0ac6d7c04..2484e9e6c1e 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -39,6 +39,7 @@ #include <linux/bitops.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> @@ -388,18 +389,19 @@ static void emac_hash_mc(struct emac_instance *dev) const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); u32 gaht_temp[regs]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev)); memset(gaht_temp, 0, sizeof (gaht_temp)); - netdev_for_each_mc_addr(dmi, dev->ndev) { + netdev_for_each_mc_addr(ha, dev->ndev) { int slot, reg, mask; - DBG2(dev, "mc %pM" NL, dmi->dmi_addr); + DBG2(dev, "mc %pM" NL, ha->addr); - slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); + slot = EMAC_XAHT_CRC_TO_SLOT(dev, + ether_crc(ETH_ALEN, ha->addr)); reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); @@ -1176,7 +1178,7 @@ static int emac_open(struct net_device *ndev) netif_carrier_on(dev->ndev); /* Required for Pause packet support in EMAC */ - dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); + dev_mc_add_global(ndev, default_mcast_addr); emac_configure(dev); mal_poll_add(dev->mal, &dev->commac); @@ -1699,7 +1701,6 @@ static int emac_poll_rx(void *param, int budget) skb_put(skb, len); push_packet: - skb->dev = dev->ndev; skb->protocol = eth_type_trans(skb, dev->ndev); emac_rx_csum(dev, skb, ctrl); diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h index 18d56c6c423..b1cbe6fdfc7 100644 --- a/drivers/net/ibm_newemac/core.h +++ b/drivers/net/ibm_newemac/core.h @@ -34,6 +34,7 @@ #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/of_platform.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/dcr.h> diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 2a2fc17b287..5b3d94419fe 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c @@ -26,6 +26,7 @@ */ #include <linux/delay.h> +#include <linux/slab.h> #include "core.h" #include <asm/dcr-regs.h> diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index 8d76cb89dbd..5b90d34c845 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -21,6 +21,7 @@ * option) any later version. * */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> #include <asm/io.h> diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c index 17b15412494..1f038f808ab 100644 --- a/drivers/net/ibm_newemac/zmii.c +++ b/drivers/net/ibm_newemac/zmii.c @@ -21,6 +21,7 @@ * option) any later version. * */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> #include <asm/io.h> diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index b5d0f4e973f..294ccfb427c 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c @@ -79,7 +79,6 @@ History: #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/time.h> @@ -385,7 +384,7 @@ static void InitBoard(struct net_device *dev) int camcnt; camentry_t cams[16]; u32 cammask; - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; u16 rcrval; /* reset the SONIC */ @@ -420,8 +419,8 @@ static void InitBoard(struct net_device *dev) /* start putting the multicast addresses into the CAM list. Stop if it is full. */ - netdev_for_each_mc_addr(mcptr, dev) { - putcam(cams, &camcnt, mcptr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + putcam(cams, &camcnt, ha->addr); if (camcnt == 16) break; } @@ -479,7 +478,7 @@ static void InitBoard(struct net_device *dev) /* if still multicast addresses left or ALLMULTI is set, set the multicast enable bit */ - if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL)) + if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt) rcrval |= RCREG_AMC; /* promiscous mode ? */ diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index f2b93796695..0d2c3ac2005 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -49,6 +49,7 @@ #include <linux/proc_fs.h> #include <linux/in.h> #include <linux/ip.h> +#include <linux/slab.h> #include <net/net_namespace.h> #include <asm/hvcall.h> #include <asm/atomic.h> @@ -1072,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); } } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* clear the filter table & disable filtering */ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastEnableRecv | @@ -1083,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); } /* add the addresses to the filter table */ - netdev_for_each_mc_addr(mclist, netdev) { + netdev_for_each_mc_addr(ha, netdev) { // add the multicast address to the filter table unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); + memcpy(((char *)&mcast_addr)+2, ha->addr, 6); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1577,7 +1578,7 @@ static struct attribute * veth_pool_attrs[] = { NULL, }; -static struct sysfs_ops veth_pool_ops = { +static const struct sysfs_ops veth_pool_ops = { .show = veth_pool_show, .store = veth_pool_store, }; diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 9d7fa2fb85e..3ef495537dc 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -30,7 +30,6 @@ */ #include <linux/types.h> -#include <linux/slab.h> #include <linux/if_ether.h> #include "e1000_mac.h" @@ -94,6 +93,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: case E1000_DEV_ID_82576_SERDES_QUAD: mac->type = e1000_82576; break; @@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) case E1000_DEV_ID_82580_COPPER_DUAL: mac->type = e1000_82580; break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; default: return -E1000_ERR_MAC_INIT; break; @@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) mac->rar_entry_count = E1000_RAR_ENTRIES_82576; if (mac->type == e1000_82580) mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; /* reset */ - if (mac->type == e1000_82580) + if (mac->type >= e1000_82580) mac->ops.reset_hw = igb_reset_hw_82580; else mac->ops.reset_hw = igb_reset_hw_82575; @@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.reset = igb_phy_hw_reset_sgmii_82575; phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; - } else if (hw->mac.type == e1000_82580) { + } else if (hw->mac.type >= e1000_82580) { phy->ops.reset = igb_phy_hw_reset; phy->ops.read_reg = igb_read_phy_reg_82580; phy->ops.write_reg = igb_write_phy_reg_82580; @@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; break; case I82580_I_PHY_ID: + case I350_I_PHY_ID: phy->type = e1000_phy_82580; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; phy->ops.get_cable_length = igb_get_cable_length_82580; @@ -1445,7 +1454,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) **/ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { - u32 mdicnfg = 0; s32 ret_val; @@ -1453,15 +1461,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) if (ret_val) goto out; - /* - * We config the phy address in MDICNFG register now. Same bits - * as before. The values in MDIC can be written but will be - * ignored. This allows us to call the old function after - * configuring the PHY address in the new register - */ - mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); - wr32(E1000_MDICNFG, mdicnfg); - ret_val = igb_read_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); @@ -1480,7 +1479,6 @@ out: **/ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { - u32 mdicnfg = 0; s32 ret_val; @@ -1488,15 +1486,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) if (ret_val) goto out; - /* - * We config the phy address in MDICNFG register now. Same bits - * as before. The values in MDIC can be written but will be - * ignored. This allows us to call the old function after - * configuring the PHY address in the new register - */ - mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); - wr32(E1000_MDICNFG, mdicnfg); - ret_val = igb_write_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index fbe1c99c193..cbd1e1259e4 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_OFF1_ON2)) -#define E1000_RAR_ENTRIES_82575 16 -#define E1000_RAR_ENTRIES_82576 24 -#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 #define E1000_SW_SYNCH_MB 0x00000100 #define E1000_STAT_DEV_RST_SET 0x00100000 @@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 #define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 #define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 @@ -108,6 +110,7 @@ union e1000_adv_rx_desc { #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 #define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index fe6cf1b696c..31d24e0e76d 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -629,6 +629,7 @@ #define M88E1111_I_PHY_ID 0x01410CC0 #define IGP03E1000_E_PHY_ID 0x02A80390 #define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 448005276b2..cb8db78b1a0 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/netdevice.h> #include "e1000_regs.h" #include "e1000_defines.h" @@ -41,6 +42,7 @@ struct e1000_hw; #define E1000_DEV_ID_82576_FIBER 0x10E6 #define E1000_DEV_ID_82576_SERDES 0x10E7 #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 #define E1000_DEV_ID_82576_NS 0x150A #define E1000_DEV_ID_82576_NS_SERDES 0x1518 #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D @@ -52,6 +54,10 @@ struct e1000_hw; #define E1000_DEV_ID_82580_SERDES 0x1510 #define E1000_DEV_ID_82580_SGMII 0x1511 #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 #define E1000_REVISION_2 2 #define E1000_REVISION_4 4 @@ -71,6 +77,7 @@ enum e1000_mac_type { e1000_82575, e1000_82576, e1000_82580, + e1000_i350, e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ }; @@ -501,14 +508,11 @@ struct e1000_hw { u8 revision_id; }; -#ifdef DEBUG -extern char *igb_get_hw_dev_name(struct e1000_hw *hw); +extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); #define hw_dbg(format, arg...) \ - printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg) -#else -#define hw_dbg(format, arg...) -#endif -#endif + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + /* These functions must be implemented by drivers */ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 2a8a886b37e..be8d010e402 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -1367,7 +1367,8 @@ out: * igb_enable_mng_pass_thru - Enable processing of ARP's * @hw: pointer to the HW structure * - * Verifies the hardware needs to allow ARPs to be processed by the host. + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. **/ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) { @@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) manc = rd32(E1000_MANC); - if (!(manc & E1000_MANC_RCV_TCO_EN) || - !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + if (!(manc & E1000_MANC_RCV_TCO_EN)) goto out; if (hw->mac.arc_subsystem_valid) { diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index a1775705b24..7d288ccca1c 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -107,6 +107,7 @@ struct vf_data_storage { #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_64 64 /* Used for packet split */ #define IGB_RXBUFFER_128 128 /* Used for packet split */ #define IGB_RXBUFFER_1024 1024 #define IGB_RXBUFFER_2048 2048 @@ -267,7 +268,6 @@ struct igb_adapter { /* TX */ struct igb_ring *tx_ring[16]; - unsigned long tx_queue_len; u32 tx_timeout_count; /* RX */ @@ -324,6 +324,7 @@ struct igb_adapter { #define IGB_82576_TSYNC_SHIFT 19 #define IGB_82580_TSYNC_SHIFT 24 +#define IGB_TS_HDR_LEN 16 enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index a4cead12fd9..1b8fd7f4064 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/ethtool.h> #include <linux/sched.h> +#include <linux/slab.h> #include "igb.h" @@ -901,6 +902,49 @@ struct igb_reg_test { #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + /* 82580 reg test */ static struct igb_reg_test reg_test_82580[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1076,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) u32 i, toggle; switch (adapter->hw.mac.type) { + case e1000_i350: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; case e1000_82580: test = reg_test_82580; toggle = 0x7FEFF3FF; @@ -1237,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) case e1000_82580: ics_mask = 0x77DCFED5; break; + case e1000_i350: + ics_mask = 0x77DCFED5; + break; default: ics_mask = 0x7FFFFFFF; break; @@ -1813,6 +1864,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, retval = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* quad port adapters only support WoL on port A */ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { wol->supported = 0; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 583a21c1def..c19b1e0caec 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -32,6 +32,7 @@ #include <linux/pagemap.h> #include <linux/netdevice.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/net_tstamp.h> @@ -61,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = { }; static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, @@ -72,6 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, @@ -221,43 +227,17 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc) return stamp; } -#ifdef DEBUG /** - * igb_get_hw_dev_name - return device name string + * igb_get_hw_dev - return device * used by hardware layer to print debugging information **/ -char *igb_get_hw_dev_name(struct e1000_hw *hw) +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) { struct igb_adapter *adapter = hw->back; - return adapter->netdev->name; + return adapter->netdev; } /** - * igb_get_time_str - format current NIC and system time as string - */ -static char *igb_get_time_str(struct igb_adapter *adapter, - char buffer[160]) -{ - cycle_t hw = adapter->cycles.read(&adapter->cycles); - struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock)); - struct timespec sys; - struct timespec delta; - getnstimeofday(&sys); - - delta = timespec_sub(nic, sys); - - sprintf(buffer, - "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns", - hw, - (long)nic.tv_sec, nic.tv_nsec, - (long)sys.tv_sec, sys.tv_nsec, - (long)delta.tv_sec, delta.tv_nsec); - - return buffer; -} -#endif - -/** * igb_init_module - Driver Registration Routine * * igb_init_module is the first routine called when the driver is @@ -326,6 +306,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) } case e1000_82575: case e1000_82580: + case e1000_i350: default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@ -469,6 +450,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) q_vector->eims_value = 1 << msix_vector; break; case e1000_82580: + case e1000_i350: /* 82580 uses the same table-based approach as 82576 but has fewer entries as a result we carry over for queues greater than 4. */ if (rx_queue > IGB_N0_QUEUE) { @@ -549,6 +531,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) case e1000_82576: case e1000_82580: + case e1000_i350: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug. */ wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | @@ -688,7 +671,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; - /* if tx handler is seperate add 1 for every tx queue */ + /* if tx handler is separate add 1 for every tx queue */ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; @@ -1104,9 +1087,6 @@ static void igb_configure(struct igb_adapter *adapter) struct igb_ring *ring = adapter->rx_ring[i]; igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); } - - - adapter->tx_queue_len = netdev->tx_queue_len; } /** @@ -1212,7 +1192,6 @@ void igb_down(struct igb_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); /* record the stats before reset*/ @@ -1255,6 +1234,7 @@ void igb_reset(struct igb_adapter *adapter) * To take effect CTRL.RST is required. */ switch (mac->type) { + case e1000_i350: case e1000_82580: pba = rd32(E1000_RXPBS); pba = igb_rxpbs_adjust_82580(pba); @@ -1614,6 +1594,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; @@ -1827,6 +1808,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; switch (hw->mac.type) { + case e1000_i350: case e1000_82580: memset(&adapter->cycles, 0, sizeof(adapter->cycles)); adapter->cycles.read = igb_read_clock; @@ -2340,6 +2322,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) if (adapter->vfs_allocated_count) { /* 82575 and 82576 supports 2 RSS queues for VMDq */ switch (hw->mac.type) { + case e1000_i350: case e1000_82580: num_rx_queues = 1; shift = 0; @@ -2591,6 +2574,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; } + if (hw->mac.type == e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; @@ -2877,7 +2862,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list; int i; @@ -2894,8 +2879,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev) /* The shared function expects a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); igb_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -3105,17 +3090,13 @@ static void igb_watchdog_task(struct work_struct *work) ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); - /* tweak tx_queue_len according to speed/duplex and - * adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 14; break; case SPEED_100: - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } @@ -3925,6 +3906,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) * i.e. RXBUFFER_2048 --> size-4096 slab */ + if (adapter->hw.mac.type == e1000_82580) + max_frame += IGB_TS_HDR_LEN; + if (max_frame <= IGB_RXBUFFER_1024) rx_buffer_len = IGB_RXBUFFER_1024; else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) @@ -3932,6 +3916,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) else rx_buffer_len = IGB_RXBUFFER_128; + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN)) + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN; + + if ((adapter->hw.mac.type == e1000_82580) && + (rx_buffer_len == IGB_RXBUFFER_128)) + rx_buffer_len += IGB_RXBUFFER_64; + if (netif_running(netdev)) igb_down(adapter); @@ -3962,7 +3954,7 @@ void igb_update_stats(struct igb_adapter *adapter) struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - u32 rnbc, reg; + u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; @@ -4020,7 +4012,9 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.symerrs += rd32(E1000_SYMERRS); adapter->stats.sec += rd32(E1000_SEC); - adapter->stats.mpc += rd32(E1000_MPC); + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; adapter->stats.scc += rd32(E1000_SCC); adapter->stats.ecol += rd32(E1000_ECOL); adapter->stats.mcc += rd32(E1000_MCC); @@ -4035,9 +4029,7 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.gptc += rd32(E1000_GPTC); adapter->stats.gotc += rd32(E1000_GOTCL); rd32(E1000_GOTCH); /* clear GOTCL */ - rnbc = rd32(E1000_RNBC); - adapter->stats.rnbc += rnbc; - net_stats->rx_fifo_errors += rnbc; + adapter->stats.rnbc += rd32(E1000_RNBC); adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); @@ -5109,7 +5101,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector, { struct igb_adapter *adapter = q_vector->adapter; - if (vlan_tag) + if (vlan_tag && adapter->vlgrp) vlan_gro_receive(&q_vector->napi, adapter->vlgrp, vlan_tag, skb); else @@ -5148,7 +5140,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring, dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); } -static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, +static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, struct sk_buff *skb) { struct igb_adapter *adapter = q_vector->adapter; @@ -5166,13 +5158,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, * If nothing went wrong, then it should have a skb_shared_tx that we * can turn into a skb_shared_hwtstamps. */ - if (likely(!(staterr & E1000_RXDADV_STAT_TS))) - return; - if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; + if (staterr & E1000_RXDADV_STAT_TSIP) { + u32 *stamp = (u32 *)skb->data; + regval = le32_to_cpu(*(stamp + 2)); + regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; + skb_pull(skb, IGB_TS_HDR_LEN); + } else { + if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + } igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); } @@ -5280,7 +5277,8 @@ send_up: goto next_desc; } - igb_rx_hwtstamp(q_vector, staterr, skb); + if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) + igb_rx_hwtstamp(q_vector, staterr, skb); total_bytes += skb->len; total_packets++; @@ -5560,6 +5558,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, return 0; } + /* + * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one rx filter was configured. + */ + if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + } + /* enable/disable TX */ regval = rd32(E1000_TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; @@ -6136,19 +6144,25 @@ static void igb_vmm_control(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 reg; - /* replication is not supported for 82575 */ - if (hw->mac.type == e1000_82575) + switch (hw->mac.type) { + case e1000_82575: + default: + /* replication is not supported for 82575 */ return; - - /* enable replication vlan tag stripping */ - reg = rd32(E1000_RPLOLR); - reg |= E1000_RPLOLR_STRVLAN; - wr32(E1000_RPLOLR, reg); - - /* notify HW that the MAC is adding vlan tags */ - reg = rd32(E1000_DTXCTL); - reg |= E1000_DTXCTL_VLAN_ADDED; - wr32(E1000_DTXCTL, reg); + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } if (adapter->vfs_allocated_count) { igb_vmdq_set_loopback_pf(hw, true); diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index a1774b29d22..debeee2dc71 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -198,7 +198,6 @@ struct igbvf_adapter { struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; - unsigned long tx_queue_len; unsigned int restart_queue; u32 txd_cmd; diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14b..cc2309027e6 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -35,6 +35,7 @@ #include <linux/netdevice.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/mii.h> @@ -1304,8 +1305,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; - - adapter->tx_queue_len = adapter->netdev->tx_queue_len; } /** @@ -1399,7 +1398,7 @@ static void igbvf_set_multi(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list = NULL; int i; @@ -1414,8 +1413,8 @@ static void igbvf_set_multi(struct net_device *netdev) /* prepare a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); kfree(mta_list); @@ -1524,7 +1523,6 @@ void igbvf_down(struct igbvf_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); /* record the stats before reset*/ @@ -1857,21 +1855,15 @@ static void igbvf_watchdog_task(struct work_struct *work) &adapter->link_duplex); igbvf_print_link_info(adapter); - /* - * tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor - */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = 0; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = 0; - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 70871b9b045..091ea3377ed 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -44,6 +44,7 @@ #include <linux/tcp.h> #include <linux/udp.h> #include <linux/dma-mapping.h> +#include <linux/gfp.h> #ifdef CONFIG_SERIAL_8250 #include <linux/serial_core.h> @@ -1664,7 +1665,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void ioc3_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct ioc3_private *ip = netdev_priv(dev); struct ioc3 *ioc3 = ip->regs; u64 ehar = 0; @@ -1688,8 +1689,8 @@ static void ioc3_set_multicast_list(struct net_device *dev) ip->ehar_h = 0xffffffff; ip->ehar_l = 0xffffffff; } else { - netdev_for_each_mc_addr(dmi, dev) { - char *addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addr = ha->addr; if (!(*addr & 1)) continue; diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 150415e83f6..72e3d2da9e9 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -22,6 +22,7 @@ */ #include <linux/crc32.h> #include <linux/ethtool.h> +#include <linux/gfp.h> #include <linux/mii.h> #include <linux/mutex.h> @@ -569,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev) static void ipg_nic_set_multicast_list(struct net_device *dev) { void __iomem *ioaddr = ipg_ioaddr(dev); - struct dev_mc_list *mc_list_ptr; + struct netdev_hw_addr *ha; unsigned int hashindex; u32 hashtable[2]; u8 receivemode; @@ -608,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) hashtable[1] = 0x00000000; /* Cycle through all multicast addresses to filter. */ - netdev_for_each_mc_addr(mc_list_ptr, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Calculate CRC result for each multicast address. */ - hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, + hashindex = crc32_le(0xffffffff, ha->addr, ETH_ALEN); /* Use only the least significant 6 bits. */ @@ -1547,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work) container_of(work, struct ipg_nic_private, task.work); struct net_device *dev = sp->dev; - IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL)); - /* * Acknowledge HostError interrupt by resetting * IPG DMA and HOST. @@ -1825,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev) netif_stop_queue(dev); - IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount); - IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount); - IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount); IPG_DUMPTFDLIST(dev); do { diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index dfc2541bb55..6ce027355fc 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h @@ -29,7 +29,7 @@ /* GMII based PHY IDs */ #define NS 0x2000 #define MARVELL 0x0141 -#define ICPLUS_PHY 0x243 +#define ICPLUS_PHY 0x243 /* NIC Physical Layer Device MII register fields. */ #define MII_PHY_SELECTOR_IEEE8023 0x0001 @@ -96,31 +96,31 @@ enum ipg_regs { }; /* Ethernet MIB statistic register offsets. */ -#define IPG_OCTETRCVOK 0xA8 +#define IPG_OCTETRCVOK 0xA8 #define IPG_MCSTOCTETRCVDOK 0xAC #define IPG_BCSTOCTETRCVOK 0xB0 #define IPG_FRAMESRCVDOK 0xB4 #define IPG_MCSTFRAMESRCVDOK 0xB8 #define IPG_BCSTFRAMESRCVDOK 0xBE #define IPG_MACCONTROLFRAMESRCVD 0xC6 -#define IPG_FRAMETOOLONGERRRORS 0xC8 -#define IPG_INRANGELENGTHERRORS 0xCA -#define IPG_FRAMECHECKSEQERRORS 0xCC -#define IPG_FRAMESLOSTRXERRORS 0xCE -#define IPG_OCTETXMTOK 0xD0 +#define IPG_FRAMETOOLONGERRRORS 0xC8 +#define IPG_INRANGELENGTHERRORS 0xCA +#define IPG_FRAMECHECKSEQERRORS 0xCC +#define IPG_FRAMESLOSTRXERRORS 0xCE +#define IPG_OCTETXMTOK 0xD0 #define IPG_MCSTOCTETXMTOK 0xD4 #define IPG_BCSTOCTETXMTOK 0xD8 #define IPG_FRAMESXMTDOK 0xDC #define IPG_MCSTFRAMESXMTDOK 0xE0 -#define IPG_FRAMESWDEFERREDXMT 0xE4 +#define IPG_FRAMESWDEFERREDXMT 0xE4 #define IPG_LATECOLLISIONS 0xE8 #define IPG_MULTICOLFRAMES 0xEC #define IPG_SINGLECOLFRAMES 0xF0 #define IPG_BCSTFRAMESXMTDOK 0xF6 -#define IPG_CARRIERSENSEERRORS 0xF8 +#define IPG_CARRIERSENSEERRORS 0xF8 #define IPG_MACCONTROLFRAMESXMTDOK 0xFA -#define IPG_FRAMESABORTXSCOLLS 0xFC -#define IPG_FRAMESWEXDEFERRAL 0xFE +#define IPG_FRAMESABORTXSCOLLS 0xFC +#define IPG_FRAMESWEXDEFERRAL 0xFE /* RMON statistic register offsets. */ #define IPG_ETHERSTATSCOLLISIONS 0x100 @@ -134,8 +134,8 @@ enum ipg_regs { #define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 #define IPG_ETHERSTATSCRCALIGNERRORS 0x124 #define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 -#define IPG_ETHERSTATSFRAGMENTS 0x12C -#define IPG_ETHERSTATSJABBERS 0x130 +#define IPG_ETHERSTATSFRAGMENTS 0x12C +#define IPG_ETHERSTATSJABBERS 0x130 #define IPG_ETHERSTATSOCTETS 0x134 #define IPG_ETHERSTATSPKTS 0x138 #define IPG_ETHERSTATSPKTS64OCTESTS 0x13C @@ -154,10 +154,10 @@ enum ipg_regs { #define IPG_ETHERSTATSDROPEVENTS 0xCE /* Serial EEPROM offsets */ -#define IPG_EEPROM_CONFIGPARAM 0x00 +#define IPG_EEPROM_CONFIGPARAM 0x00 #define IPG_EEPROM_ASICCTRL 0x01 #define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 -#define IPG_EEPROM_SUBSYSTEMID 0x03 +#define IPG_EEPROM_SUBSYSTEMID 0x03 #define IPG_EEPROM_STATIONADDRESS0 0x10 #define IPG_EEPROM_STATIONADDRESS1 0x11 #define IPG_EEPROM_STATIONADDRESS2 0x12 @@ -168,16 +168,16 @@ enum ipg_regs { /* IOBaseAddress */ #define IPG_PIB_RSVD_MASK 0xFFFFFE01 -#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 -#define IPG_PIB_IOBASEADDRIND 0x00000001 +#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 +#define IPG_PIB_IOBASEADDRIND 0x00000001 /* MemBaseAddress */ #define IPG_PMB_RSVD_MASK 0xFFFFFE07 -#define IPG_PMB_MEMBASEADDRIND 0x00000001 +#define IPG_PMB_MEMBASEADDRIND 0x00000001 #define IPG_PMB_MEMMAPTYPE 0x00000006 #define IPG_PMB_MEMMAPTYPE0 0x00000002 #define IPG_PMB_MEMMAPTYPE1 0x00000004 -#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 +#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 /* ConfigStatus */ #define IPG_CS_RSVD_MASK 0xFFB0 @@ -196,20 +196,20 @@ enum ipg_regs { /* TFDList, TFC */ #define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF -#define IPG_TFC_FRAMEID 0x000000000000FFFF +#define IPG_TFC_FRAMEID 0x000000000000FFFF #define IPG_TFC_WORDALIGN 0x0000000000030000 #define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 -#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 #define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 #define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 #define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 #define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 #define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 #define IPG_TFC_TXINDICATE 0x0000000000400000 -#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 #define IPG_TFC_FRAGCOUNT 0x000000000F000000 -#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 -#define IPG_TFC_TFDDONE 0x0000000080000000 +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 +#define IPG_TFC_TFDDONE 0x0000000080000000 #define IPG_TFC_VID 0x00000FFF00000000 #define IPG_TFC_CFI 0x0000100000000000 #define IPG_TFC_USERPRIORITY 0x0000E00000000000 @@ -217,35 +217,35 @@ enum ipg_regs { /* TFDList, FragInfo */ #define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF #define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL /* RFD data structure masks. */ /* RFDList, RFS */ #define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF #define IPG_RFS_RXFRAMELEN 0x000000000000FFFF -#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 #define IPG_RFS_RXRUNTFRAME 0x0000000000020000 #define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 #define IPG_RFS_RXFCSERROR 0x0000000000080000 #define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 -#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 #define IPG_RFS_VLANDETECTED 0x0000000000400000 #define IPG_RFS_TCPDETECTED 0x0000000000800000 #define IPG_RFS_TCPERROR 0x0000000001000000 #define IPG_RFS_UDPDETECTED 0x0000000002000000 #define IPG_RFS_UDPERROR 0x0000000004000000 #define IPG_RFS_IPDETECTED 0x0000000008000000 -#define IPG_RFS_IPERROR 0x0000000010000000 +#define IPG_RFS_IPERROR 0x0000000010000000 #define IPG_RFS_FRAMESTART 0x0000000020000000 #define IPG_RFS_FRAMEEND 0x0000000040000000 -#define IPG_RFS_RFDDONE 0x0000000080000000 +#define IPG_RFS_RFDDONE 0x0000000080000000 #define IPG_RFS_TCI 0x0000FFFF00000000 /* RFDList, FragInfo */ #define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF #define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL /* I/O Register masks. */ @@ -254,37 +254,37 @@ enum ipg_regs { /* Statistics Mask */ #define IPG_SM_ALL 0x0FFFFFFF -#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 -#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 -#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 +#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 +#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 +#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 #define IPG_SM_RXJUMBOFRAMES 0x00000008 #define IPG_SM_TCPCHECKSUMERRORS 0x00000010 -#define IPG_SM_IPCHECKSUMERRORS 0x00000020 +#define IPG_SM_IPCHECKSUMERRORS 0x00000020 #define IPG_SM_UDPCHECKSUMERRORS 0x00000040 #define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 #define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 #define IPG_SM_INRANGELENGTHERRORS 0x00000200 #define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 #define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 -#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 -#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 -#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 +#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 +#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 +#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 #define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 -#define IPG_SM_LATECOLLISIONS 0x00010000 -#define IPG_SM_MULTICOLFRAMES 0x00020000 -#define IPG_SM_SINGLECOLFRAMES 0x00040000 +#define IPG_SM_LATECOLLISIONS 0x00010000 +#define IPG_SM_MULTICOLFRAMES 0x00020000 +#define IPG_SM_SINGLECOLFRAMES 0x00040000 #define IPG_SM_TXJUMBOFRAMES 0x00080000 #define IPG_SM_CARRIERSENSEERRORS 0x00100000 #define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 #define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 -#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 +#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 /* Countdown */ #define IPG_CD_RSVD_MASK 0x0700FFFF #define IPG_CD_COUNT 0x0000FFFF -#define IPG_CD_COUNTDOWNSPEED 0x01000000 +#define IPG_CD_COUNTDOWNSPEED 0x01000000 #define IPG_CD_COUNTDOWNMODE 0x02000000 -#define IPG_CD_COUNTINTENABLED 0x04000000 +#define IPG_CD_COUNTINTENABLED 0x04000000 /* TxDMABurstThresh */ #define IPG_TB_RSVD_MASK 0xFF @@ -653,15 +653,28 @@ enum ipg_regs { * Miscellaneous macros. */ -/* Marco for printing debug statements. */ +/* Macros for printing debug statements. */ #ifdef IPG_DEBUG -# define IPG_DEBUG_MSG(args...) -# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args) +# define IPG_DEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) +# define IPG_DDEBUG_MSG(fmt, args...) \ + printk(KERN_DEBUG "IPG: " fmt, ##args) # define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) # define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) #else -# define IPG_DEBUG_MSG(args...) -# define IPG_DDEBUG_MSG(args...) +# define IPG_DEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) +# define IPG_DDEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) # define IPG_DUMPRFDLIST(args) # define IPG_DUMPTFDLIST(args) #endif diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index af10e97345c..25bb2a015e1 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -397,5 +397,11 @@ config MCS_FIR To compile it as a module, choose M here: the module will be called mcs7780. +config SH_IRDA + tristate "SuperH IrDA driver" + depends on IRDA && ARCH_SHMOBILE + help + Say Y here if your want to enable SuperH IrDA devices. + endmenu diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index e030d47e279..dfc64537f62 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o obj-$(CONFIG_MCS_FIR) += mcs7780.o obj-$(CONFIG_AU1000_FIR) += au1k_ir.o +obj-$(CONFIG_SH_IRDA) += sh_irda.o # SIR drivers obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o obj-$(CONFIG_BFIN_SIR) += bfin_sir.o diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 12c7b006f76..a3cb109006a 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -22,6 +22,7 @@ ********************************************************************/ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/kernel.h> #include <linux/types.h> @@ -29,7 +30,6 @@ #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/serial_reg.h> @@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__); + IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__); } if (ali_ircc_dma_receive_complete(self)) { - IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__); + IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__); self->ier = IER_EOM; } else { - IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__); + IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__); self->ier = IER_EOM | IER_TIMER; } @@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self) if(OldMessageCount > ((self->LineStatus+1) & 0x07)) { self->rcvFramesOverflow = TRUE; - IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__); + IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__); } /* Disable Timer */ switch_bank(iobase, BANK1); @@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self) // benjamin 2000/11/10 06:32PM if (self->io.speed > 115200) { - IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ ); + IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ ); self->ier = IER_EOM; // SetCOMInterrupts(self, TRUE); @@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); - IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud); + IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud); /* This function *must* be called with irq off and spin-lock. * - Jean II */ @@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb, diff = self->now.tv_usec - self->stamp.tv_usec; /* self->stamp is set from ali_ircc_dma_receive_complete() */ - IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff); + IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff); if (diff < 0) diff += 1000000; @@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self) tmp = inb(iobase+FIR_LCR_B); tmp &= ~0x20; // Disable SIP outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); - IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B)); + IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); outb(0, iobase+FIR_LSR); @@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self) //switch_bank(iobase, BANK0); tmp = inb(iobase+FIR_LCR_B); outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM - IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B)); + IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B)); /* Set Rx Threshold */ switch_bank(iobase, BANK1); @@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) /* Check for errors */ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) { - IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ ); + IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ ); /* Skip frame */ self->netdev->stats.rx_errors++; @@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) if (status & LSR_FIFO_UR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ ); + IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ ); } if (status & LSR_FRAME_ERROR) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ ); + IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ ); } if (status & LSR_CRC_ERROR) { self->netdev->stats.rx_crc_errors++; - IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ ); + IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ ); } if(self->rcvFramesOverflow) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ ); + IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ ); } if(len == 0) { self->netdev->stats.rx_frame_errors++; - IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ ); + IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ ); } } else @@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) val = inb(iobase+FIR_BSR); if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) { - IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ ); + IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ ); /* Put this entry back in fifo */ st_fifo->head--; diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h index dac71b1f4f9..b54a6f08db4 100644 --- a/drivers/net/irda/bfin_sir.h +++ b/drivers/net/irda/bfin_sir.h @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 2c9b3af1661..4441fa3389c 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb) /* Usually precursor to a hot-unplug on OHCI. */ default: self->netdev->stats.rx_errors++; - IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags); + IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags); break; } /* If we received an error, we don't want to resubmit the diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 20f9bc62668..ee1dde52e8f 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/tty.h> #include <linux/init.h> #include <asm/uaccess.h> diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 2413295ebd9..e30cdbb1474 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -43,6 +43,7 @@ ********************************************************************/ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/kernel.h> #include <linux/types.h> @@ -50,7 +51,6 @@ #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 84db145d2b5..1a54f6bb68c 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index c412e802617..1dcdce0631a 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -331,7 +331,7 @@ static int sa1100_irda_resume(struct platform_device *pdev) * If we missed a speed change, initialise at the new speed * directly. It is debatable whether this is actually * required, but in the interests of continuing from where - * we left off it is desireable. The converse argument is + * we left off it is desirable. The converse argument is * that we should re-negotiate at 9600 baud again. */ if (si->newspeed) { diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c new file mode 100644 index 00000000000..9a828b06a57 --- /dev/null +++ b/drivers/net/irda/sh_irda.c @@ -0,0 +1,865 @@ +/* + * SuperH IrDA Driver + * + * Copyright (C) 2010 Renesas Solutions Corp. + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * Based on sh_sir.c + * Copyright (C) 2009 Renesas Solutions Corp. + * Copyright 2006-2009 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * CAUTION + * + * This driver is very simple. + * So, it doesn't have below support now + * - MIR/FIR support + * - DMA transfer support + * - FIFO mode support + */ +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <net/irda/wrapper.h> +#include <net/irda/irda_device.h> + +#define DRIVER_NAME "sh_irda" + +#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377) +#define __IRDARAM_LEN 0x13FF +#else +#define __IRDARAM_LEN 0x1039 +#endif + +#define IRTMR 0x1F00 /* Transfer mode */ +#define IRCFR 0x1F02 /* Configuration */ +#define IRCTR 0x1F04 /* IR control */ +#define IRTFLR 0x1F20 /* Transmit frame length */ +#define IRTCTR 0x1F22 /* Transmit control */ +#define IRRFLR 0x1F40 /* Receive frame length */ +#define IRRCTR 0x1F42 /* Receive control */ +#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */ +#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */ +#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */ +#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */ +#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */ +#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */ +#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */ +#define CRCCTR 0x1F80 /* CRC engine control */ +#define CRCIR 0x1F86 /* CRC engine input data */ +#define CRCCR 0x1F8A /* CRC engine calculation */ +#define CRCOR 0x1F8E /* CRC engine output data */ +#define FIFOCP 0x1FC0 /* FIFO current pointer */ +#define FIFOFP 0x1FC2 /* FIFO follow pointer */ +#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */ +#define FIFORSOR 0x1FC6 /* FIFO receive status OR */ +#define FIFOSEL 0x1FC8 /* FIFO select */ +#define FIFORS 0x1FCA /* FIFO receive status */ +#define FIFORFL 0x1FCC /* FIFO receive frame length */ +#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */ +#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */ +#define BIFCTL 0x1FD2 /* BUS interface control */ +#define IRDARAM 0x0000 /* IrDA buffer RAM */ +#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */ + +/* IRTMR */ +#define TMD_MASK (0x3 << 14) /* Transfer Mode */ +#define TMD_SIR (0x0 << 14) +#define TMD_MIR (0x3 << 14) +#define TMD_FIR (0x2 << 14) + +#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */ +#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */ +#define SIM (1 << 0) /* SIR Interrupt Mask */ +#define xIM_MASK (FIFORIM | MIM | SIM) + +/* IRCFR */ +#define RTO_SHIFT 8 /* shift for Receive Timeout */ +#define RTO (0x3 << RTO_SHIFT) + +/* IRTCTR */ +#define ARMOD (1 << 15) /* Auto-Receive Mode */ +#define TE (1 << 0) /* Transmit Enable */ + +/* IRRFLR */ +#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */ + +/* IRRCTR */ +#define RE (1 << 0) /* Receive Enable */ + +/* + * SIRISR, SIRIMR, SIRICR, + * MFIRISR, MFIRIMR, MFIRICR + */ +#define FRE (1 << 15) /* Frame Receive End */ +#define TROV (1 << 11) /* Transfer Area Overflow */ +#define xIR_9 (1 << 9) +#define TOT xIR_9 /* for SIR Timeout */ +#define ABTD xIR_9 /* for MIR/FIR Abort Detection */ +#define xIR_8 (1 << 8) +#define FER xIR_8 /* for SIR Framing Error */ +#define CRCER xIR_8 /* for MIR/FIR CRC error */ +#define FTE (1 << 7) /* Frame Transmit End */ +#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE) + +/* SIRBCR */ +#define BRC_MASK (0x3F) /* mask for Baud Rate Count */ + +/* CRCCTR */ +#define CRC_RST (1 << 15) /* CRC Engine Reset */ +#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */ + +/* CRCIR */ +#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */ + +/************************************************************************ + + + enum / structure + + +************************************************************************/ +enum sh_irda_mode { + SH_IRDA_NONE = 0, + SH_IRDA_SIR, + SH_IRDA_MIR, + SH_IRDA_FIR, +}; + +struct sh_irda_self; +struct sh_irda_xir_func { + int (*xir_fre) (struct sh_irda_self *self); + int (*xir_trov) (struct sh_irda_self *self); + int (*xir_9) (struct sh_irda_self *self); + int (*xir_8) (struct sh_irda_self *self); + int (*xir_fte) (struct sh_irda_self *self); +}; + +struct sh_irda_self { + void __iomem *membase; + unsigned int irq; + struct clk *clk; + + struct net_device *ndev; + + struct irlap_cb *irlap; + struct qos_info qos; + + iobuff_t tx_buff; + iobuff_t rx_buff; + + enum sh_irda_mode mode; + spinlock_t lock; + + struct sh_irda_xir_func *xir_func; +}; + +/************************************************************************ + + + common function + + +************************************************************************/ +static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data) +{ + unsigned long flags; + + spin_lock_irqsave(&self->lock, flags); + iowrite16(data, self->membase + offset); + spin_unlock_irqrestore(&self->lock, flags); +} + +static u16 sh_irda_read(struct sh_irda_self *self, u32 offset) +{ + unsigned long flags; + u16 ret; + + spin_lock_irqsave(&self->lock, flags); + ret = ioread16(self->membase + offset); + spin_unlock_irqrestore(&self->lock, flags); + + return ret; +} + +static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset, + u16 mask, u16 data) +{ + unsigned long flags; + u16 old, new; + + spin_lock_irqsave(&self->lock, flags); + old = ioread16(self->membase + offset); + new = (old & ~mask) | data; + if (old != new) + iowrite16(data, self->membase + offset); + spin_unlock_irqrestore(&self->lock, flags); +} + +/************************************************************************ + + + mode function + + +************************************************************************/ +/*===================================== + * + * common + * + *=====================================*/ +static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable) +{ + struct device *dev = &self->ndev->dev; + + sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0); + dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable"); +} + +static int sh_irda_set_timeout(struct sh_irda_self *self, int interval) +{ + struct device *dev = &self->ndev->dev; + + if (SH_IRDA_SIR != self->mode) + interval = 0; + + if (interval < 0 || interval > 2) { + dev_err(dev, "unsupported timeout interval\n"); + return -EINVAL; + } + + sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT); + return 0; +} + +static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate) +{ + struct device *dev = &self->ndev->dev; + u16 val; + + if (baudrate < 0) + return 0; + + if (SH_IRDA_SIR != self->mode) { + dev_err(dev, "it is not SIR mode\n"); + return -EINVAL; + } + + /* + * Baud rate (bits/s) = + * (48 MHz / 26) / (baud rate counter value + 1) x 16 + */ + val = (48000000 / 26 / 16 / baudrate) - 1; + dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val); + + sh_irda_update_bits(self, SIRBCR, BRC_MASK, val); + + return 0; +} + +static int xir_get_rcv_length(struct sh_irda_self *self) +{ + return RFL_MASK & sh_irda_read(self, IRRFLR); +} + +/*===================================== + * + * NONE MODE + * + *=====================================*/ +static int xir_fre(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + dev_err(dev, "none mode: frame recv\n"); + return 0; +} + +static int xir_trov(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + dev_err(dev, "none mode: buffer ram over\n"); + return 0; +} + +static int xir_9(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + dev_err(dev, "none mode: time over\n"); + return 0; +} + +static int xir_8(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + dev_err(dev, "none mode: framing error\n"); + return 0; +} + +static int xir_fte(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + dev_err(dev, "none mode: frame transmit end\n"); + return 0; +} + +static struct sh_irda_xir_func xir_func = { + .xir_fre = xir_fre, + .xir_trov = xir_trov, + .xir_9 = xir_9, + .xir_8 = xir_8, + .xir_fte = xir_fte, +}; + +/*===================================== + * + * MIR/FIR MODE + * + * MIR/FIR are not supported now + *=====================================*/ +static struct sh_irda_xir_func mfir_func = { + .xir_fre = xir_fre, + .xir_trov = xir_trov, + .xir_9 = xir_9, + .xir_8 = xir_8, + .xir_fte = xir_fte, +}; + +/*===================================== + * + * SIR MODE + * + *=====================================*/ +static int sir_fre(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + u16 data16; + u8 *data = (u8 *)&data16; + int len = xir_get_rcv_length(self); + int i, j; + + if (len > IRDARAM_LEN) + len = IRDARAM_LEN; + + dev_dbg(dev, "frame recv length = %d\n", len); + + for (i = 0; i < len; i++) { + j = i % 2; + if (!j) + data16 = sh_irda_read(self, IRDARAM + i); + + async_unwrap_char(self->ndev, &self->ndev->stats, + &self->rx_buff, data[j]); + } + self->ndev->last_rx = jiffies; + + sh_irda_rcv_ctrl(self, 1); + + return 0; +} + +static int sir_trov(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + + dev_err(dev, "buffer ram over\n"); + sh_irda_rcv_ctrl(self, 1); + return 0; +} + +static int sir_tot(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + + dev_err(dev, "time over\n"); + sh_irda_set_baudrate(self, 9600); + sh_irda_rcv_ctrl(self, 1); + return 0; +} + +static int sir_fer(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + + dev_err(dev, "framing error\n"); + sh_irda_rcv_ctrl(self, 1); + return 0; +} + +static int sir_fte(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + + dev_dbg(dev, "frame transmit end\n"); + netif_wake_queue(self->ndev); + + return 0; +} + +static struct sh_irda_xir_func sir_func = { + .xir_fre = sir_fre, + .xir_trov = sir_trov, + .xir_9 = sir_tot, + .xir_8 = sir_fer, + .xir_fte = sir_fte, +}; + +static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) +{ + struct device *dev = &self->ndev->dev; + struct sh_irda_xir_func *func; + const char *name; + u16 data; + + switch (mode) { + case SH_IRDA_SIR: + name = "SIR"; + data = TMD_SIR; + func = &sir_func; + break; + case SH_IRDA_MIR: + name = "MIR"; + data = TMD_MIR; + func = &mfir_func; + break; + case SH_IRDA_FIR: + name = "FIR"; + data = TMD_FIR; + func = &mfir_func; + break; + default: + name = "NONE"; + data = 0; + func = &xir_func; + break; + } + + self->mode = mode; + self->xir_func = func; + sh_irda_update_bits(self, IRTMR, TMD_MASK, data); + + dev_dbg(dev, "switch to %s mode", name); +} + +/************************************************************************ + + + irq function + + +************************************************************************/ +static void sh_irda_set_irq_mask(struct sh_irda_self *self) +{ + u16 tmr_hole; + u16 xir_reg; + + /* set all mask */ + sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK); + sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK); + sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK); + + /* clear irq */ + sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK); + sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK); + + switch (self->mode) { + case SH_IRDA_SIR: + tmr_hole = SIM; + xir_reg = SIRIMR; + break; + case SH_IRDA_MIR: + case SH_IRDA_FIR: + tmr_hole = MIM; + xir_reg = MFIRIMR; + break; + default: + tmr_hole = 0; + xir_reg = 0; + break; + } + + /* open mask */ + if (xir_reg) { + sh_irda_update_bits(self, IRTMR, tmr_hole, 0); + sh_irda_update_bits(self, xir_reg, xIR_MASK, 0); + } +} + +static irqreturn_t sh_irda_irq(int irq, void *dev_id) +{ + struct sh_irda_self *self = dev_id; + struct sh_irda_xir_func *func = self->xir_func; + u16 isr = sh_irda_read(self, SIRISR); + + /* clear irq */ + sh_irda_write(self, SIRICR, isr); + + if (isr & FRE) + func->xir_fre(self); + if (isr & TROV) + func->xir_trov(self); + if (isr & xIR_9) + func->xir_9(self); + if (isr & xIR_8) + func->xir_8(self); + if (isr & FTE) + func->xir_fte(self); + + return IRQ_HANDLED; +} + +/************************************************************************ + + + CRC function + + +************************************************************************/ +static void sh_irda_crc_reset(struct sh_irda_self *self) +{ + sh_irda_write(self, CRCCTR, CRC_RST); +} + +static void sh_irda_crc_add(struct sh_irda_self *self, u16 data) +{ + sh_irda_write(self, CRCIR, data & CRC_IN_MASK); +} + +static u16 sh_irda_crc_cnt(struct sh_irda_self *self) +{ + return CRC_CT_MASK & sh_irda_read(self, CRCCTR); +} + +static u16 sh_irda_crc_out(struct sh_irda_self *self) +{ + return sh_irda_read(self, CRCOR); +} + +static int sh_irda_crc_init(struct sh_irda_self *self) +{ + struct device *dev = &self->ndev->dev; + int ret = -EIO; + u16 val; + + sh_irda_crc_reset(self); + + sh_irda_crc_add(self, 0xCC); + sh_irda_crc_add(self, 0xF5); + sh_irda_crc_add(self, 0xF1); + sh_irda_crc_add(self, 0xA7); + + val = sh_irda_crc_cnt(self); + if (4 != val) { + dev_err(dev, "CRC count error %x\n", val); + goto crc_init_out; + } + + val = sh_irda_crc_out(self); + if (0x51DF != val) { + dev_err(dev, "CRC result error%x\n", val); + goto crc_init_out; + } + + ret = 0; + +crc_init_out: + + sh_irda_crc_reset(self); + return ret; +} + +/************************************************************************ + + + iobuf function + + +************************************************************************/ +static void sh_irda_remove_iobuf(struct sh_irda_self *self) +{ + kfree(self->rx_buff.head); + + self->tx_buff.head = NULL; + self->tx_buff.data = NULL; + self->rx_buff.head = NULL; + self->rx_buff.data = NULL; +} + +static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize) +{ + if (self->rx_buff.head || + self->tx_buff.head) { + dev_err(&self->ndev->dev, "iobuff has already existed."); + return -EINVAL; + } + + /* rx_buff */ + self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL); + if (!self->rx_buff.head) + return -ENOMEM; + + self->rx_buff.truesize = rxsize; + self->rx_buff.in_frame = FALSE; + self->rx_buff.state = OUTSIDE_FRAME; + self->rx_buff.data = self->rx_buff.head; + + /* tx_buff */ + self->tx_buff.head = self->membase + IRDARAM; + self->tx_buff.truesize = IRDARAM_LEN; + + return 0; +} + +/************************************************************************ + + + net_device_ops function + + +************************************************************************/ +static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct sh_irda_self *self = netdev_priv(ndev); + struct device *dev = &self->ndev->dev; + int speed = irda_get_next_speed(skb); + int ret; + + dev_dbg(dev, "hard xmit\n"); + + netif_stop_queue(ndev); + sh_irda_rcv_ctrl(self, 0); + + ret = sh_irda_set_baudrate(self, speed); + if (ret < 0) + return ret; + + self->tx_buff.len = 0; + if (skb->len) { + unsigned long flags; + + spin_lock_irqsave(&self->lock, flags); + self->tx_buff.len = async_wrap_skb(skb, + self->tx_buff.head, + self->tx_buff.truesize); + spin_unlock_irqrestore(&self->lock, flags); + + if (self->tx_buff.len > self->tx_buff.truesize) + self->tx_buff.len = self->tx_buff.truesize; + + sh_irda_write(self, IRTFLR, self->tx_buff.len); + sh_irda_write(self, IRTCTR, ARMOD | TE); + } + + dev_kfree_skb(skb); + + return 0; +} + +static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) +{ + /* + * FIXME + * + * This function is needed for irda framework. + * But nothing to do now + */ + return 0; +} + +static struct net_device_stats *sh_irda_stats(struct net_device *ndev) +{ + struct sh_irda_self *self = netdev_priv(ndev); + + return &self->ndev->stats; +} + +static int sh_irda_open(struct net_device *ndev) +{ + struct sh_irda_self *self = netdev_priv(ndev); + int err; + + clk_enable(self->clk); + err = sh_irda_crc_init(self); + if (err) + goto open_err; + + sh_irda_set_mode(self, SH_IRDA_SIR); + sh_irda_set_timeout(self, 2); + sh_irda_set_baudrate(self, 9600); + + self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); + if (!self->irlap) { + err = -ENODEV; + goto open_err; + } + + netif_start_queue(ndev); + sh_irda_rcv_ctrl(self, 1); + sh_irda_set_irq_mask(self); + + dev_info(&ndev->dev, "opened\n"); + + return 0; + +open_err: + clk_disable(self->clk); + + return err; +} + +static int sh_irda_stop(struct net_device *ndev) +{ + struct sh_irda_self *self = netdev_priv(ndev); + + /* Stop IrLAP */ + if (self->irlap) { + irlap_close(self->irlap); + self->irlap = NULL; + } + + netif_stop_queue(ndev); + + dev_info(&ndev->dev, "stoped\n"); + + return 0; +} + +static const struct net_device_ops sh_irda_ndo = { + .ndo_open = sh_irda_open, + .ndo_stop = sh_irda_stop, + .ndo_start_xmit = sh_irda_hard_xmit, + .ndo_do_ioctl = sh_irda_ioctl, + .ndo_get_stats = sh_irda_stats, +}; + +/************************************************************************ + + + platform_driver function + + +************************************************************************/ +static int __devinit sh_irda_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct sh_irda_self *self; + struct resource *res; + char clk_name[8]; + unsigned int irq; + int err = -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!res || irq < 0) { + dev_err(&pdev->dev, "Not enough platform resources.\n"); + goto exit; + } + + ndev = alloc_irdadev(sizeof(*self)); + if (!ndev) + goto exit; + + self = netdev_priv(ndev); + self->membase = ioremap_nocache(res->start, resource_size(res)); + if (!self->membase) { + err = -ENXIO; + dev_err(&pdev->dev, "Unable to ioremap.\n"); + goto err_mem_1; + } + + err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); + if (err) + goto err_mem_2; + + snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id); + self->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(self->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + goto err_mem_3; + } + + irda_init_max_qos_capabilies(&self->qos); + + ndev->netdev_ops = &sh_irda_ndo; + ndev->irq = irq; + + self->ndev = ndev; + self->qos.baud_rate.bits &= IR_9600; /* FIXME */ + self->qos.min_turn_time.bits = 1; /* 10 ms or more */ + spin_lock_init(&self->lock); + + irda_qos_bits_to_value(&self->qos); + + err = register_netdev(ndev); + if (err) + goto err_mem_4; + + platform_set_drvdata(pdev, ndev); + + if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { + dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); + goto err_mem_4; + } + + dev_info(&pdev->dev, "SuperH IrDA probed\n"); + + goto exit; + +err_mem_4: + clk_put(self->clk); +err_mem_3: + sh_irda_remove_iobuf(self); +err_mem_2: + iounmap(self->membase); +err_mem_1: + free_netdev(ndev); +exit: + return err; +} + +static int __devexit sh_irda_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_irda_self *self = netdev_priv(ndev); + + if (!self) + return 0; + + unregister_netdev(ndev); + clk_put(self->clk); + sh_irda_remove_iobuf(self); + iounmap(self->membase); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver sh_irda_driver = { + .probe = sh_irda_probe, + .remove = __devexit_p(sh_irda_remove), + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init sh_irda_init(void) +{ + return platform_driver_register(&sh_irda_driver); +} + +static void __exit sh_irda_exit(void) +{ + platform_driver_unregister(&sh_irda_driver); +} + +module_init(sh_irda_init); +module_exit(sh_irda_exit); + +MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); +MODULE_DESCRIPTION("SuperH IrDA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index d7c983dc91a..5c5f99d5034 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> #include <asm/clock.h> @@ -645,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev) sh_sir_set_baudrate(self, 9600); self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); - if (!self->irlap) + if (!self->irlap) { + err = -ENODEV; goto open_err; + } /* * Now enable the interrupt then start the queue @@ -706,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) struct sh_sir_self *self; struct resource *res; char clk_name[8]; - void __iomem *base; unsigned int irq; int err = -ENOMEM; @@ -721,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) if (!ndev) goto exit; - base = ioremap_nocache(res->start, resource_size(res)); - if (!base) { + self = netdev_priv(ndev); + self->membase = ioremap_nocache(res->start, resource_size(res)); + if (!self->membase) { err = -ENXIO; dev_err(&pdev->dev, "Unable to ioremap.\n"); goto err_mem_1; } - self = netdev_priv(ndev); err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); if (err) goto err_mem_2; @@ -745,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev) ndev->netdev_ops = &sh_sir_ndo; ndev->irq = irq; - self->membase = base; self->ndev = ndev; self->qos.baud_rate.bits &= IR_9600; /* FIXME */ self->qos.min_turn_time.bits = 1; /* 10 ms or more */ diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 4b2a1a9eac2..de91cd14016 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 8f7d0d146f2..6af84d88cd0 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -48,13 +48,13 @@ #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/serial_reg.h> #include <linux/dma-mapping.h> #include <linux/pnp.h> #include <linux/platform_device.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 6533c010cf5..b0a6cd815be 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -45,11 +45,11 @@ F02 Oct/28/02: Add SB device ID for 3147 and 3177. #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/pci.h> #include <linux/dma-mapping.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 209d4bcface..e1450527287 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -1742,7 +1742,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice \n", + IRDA_ERROR("%s - %s: no netdevice\n", __func__, pci_name(pdev)); return 0; } @@ -1781,7 +1781,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) vlsi_irda_dev_t *idev; if (!ndev) { - IRDA_ERROR("%s - %s: no netdevice \n", + IRDA_ERROR("%s - %s: no netdevice\n", __func__, pci_name(pdev)); return 0; } diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 551810fd297..cb0cb758be6 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -46,10 +46,10 @@ #include <linux/netdevice.h> #include <linux/ioport.h> #include <linux/delay.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/dma-mapping.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> @@ -65,7 +65,6 @@ #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */ #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */ #endif -#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */ #define CONFIG_USE_W977_PNP /* Currently needed */ #define PIO_MAX_SPEED 115200 @@ -533,25 +532,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, self->tx_buff.len = skb->len; mtt = irda_get_mtt(skb); -#ifdef CONFIG_USE_INTERNAL_TIMER - if (mtt > 50) { - /* Adjust for timer resolution */ - mtt /= 1000+1; - - /* Setup timer */ - switch_bank(iobase, SET4); - outb(mtt & 0xff, iobase+TMRL); - outb((mtt >> 8) & 0x0f, iobase+TMRH); - - /* Start timer */ - outb(IR_MSL_EN_TMR, iobase+IR_MSL); - self->io.direction = IO_XMIT; - - /* Enable timer interrupt */ - switch_bank(iobase, SET0); - outb(ICR_ETMRI, iobase+ICR); - } else { -#endif IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); if (mtt) udelay(mtt); @@ -560,9 +540,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, switch_bank(iobase, SET0); outb(ICR_EDMAI, iobase+ICR); w83977af_dma_write(self, iobase); -#ifdef CONFIG_USE_INTERNAL_TIMER - } -#endif } else { self->tx_buff.data = self->tx_buff.head; self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, @@ -876,20 +853,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self) /* Check if we have transferred all data to memory */ switch_bank(iobase, SET0); if (inb(iobase+USR) & USR_RDR) { -#ifdef CONFIG_USE_INTERNAL_TIMER - /* Put this entry back in fifo */ - st_fifo->head--; - st_fifo->len++; - st_fifo->entries[st_fifo->head].status = status; - st_fifo->entries[st_fifo->head].len = len; - - /* Restore set register */ - outb(set, iobase+SSR); - - return FALSE; /* I'll be back! */ -#else udelay(80); /* Should be enough!? */ -#endif } skb = dev_alloc_skb(len+1); diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 966de5d6952..ba1de5973fb 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -69,6 +69,7 @@ #include <linux/mm.h> #include <linux/ethtool.h> #include <linux/if_ether.h> +#include <linux/slab.h> #include <asm/abs_addr.h> #include <asm/iseries/mf.h> @@ -384,7 +385,7 @@ static struct attribute *veth_cnx_default_attrs[] = { NULL }; -static struct sysfs_ops veth_cnx_sysfs_ops = { +static const struct sysfs_ops veth_cnx_sysfs_ops = { .show = veth_cnx_attribute_show }; @@ -441,7 +442,7 @@ static struct attribute *veth_port_default_attrs[] = { NULL }; -static struct sysfs_ops veth_port_sysfs_ops = { +static const struct sysfs_ops veth_port_sysfs_ops = { .show = veth_port_attribute_show }; @@ -961,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > VETH_MAX_MCAST)) { port->promiscuous = 1; } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; port->promiscuous = 0; /* Update table */ port->num_mcast = 0; - netdev_for_each_mc_addr(dmi, dev) { - u8 *addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + u8 *addr = ha->addr; u64 xaddr = 0; if (addr[0] & 0x01) {/* multicast address? */ diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c9fef65cb98..912dd1d5772 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1058,7 +1058,7 @@ ixgb_set_multi(struct net_device *netdev) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; int i; @@ -1089,9 +1089,9 @@ ixgb_set_multi(struct net_device *netdev) IXGB_WRITE_REG(hw, RCTL, rctl); i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) + netdev_for_each_mc_addr(ha, netdev) memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], - mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); + ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS); ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); } diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 19e94ee155a..79c35ae3718 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FDIR_INDICES 64 #ifdef IXGBE_FCOE #define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES +#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { int indices; int mask; } ____cacheline_internodealigned_in_smp; -#define MAX_RX_QUEUES 128 -#define MAX_TX_QUEUES 128 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ? 8 : 1) diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 1f30e163bd9..f894bb63304 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -39,6 +39,7 @@ #define IXGBE_82599_MC_TBL_SIZE 128 #define IXGBE_82599_VFT_TBL_SIZE 128 +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, @@ -68,7 +69,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; } else { + mac->ops.flap_tx_laser = NULL; if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || @@ -413,6 +416,41 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, } /** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); + + if (hw->mac.autotry_restart) { + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + udelay(100); + + /* Enable tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msleep(100); + + hw->mac.autotry_restart = false; + } +} + +/** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -440,16 +478,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, speed &= phy_link_speed; /* - * When the driver changes the link speeds that it can support, - * it sets autotry_restart to true to indicate that we need to - * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the tx laser, to - * alert the link partner that it also needs to restart autotry on its - * end. This is consistent with true clause 37 autoneg, which also - * involves a loss of signal. - */ - - /* * Try each speed one by one, highest priority first. We do this in * software because 10gb fiber doesn't support speed autonegotiation. */ @@ -466,6 +494,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, /* Set the module link speed */ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); /* Allow module to change analog characteristics (1G->10G) */ msleep(40); @@ -478,19 +507,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, return status; /* Flap the tx laser if it has not already been done */ - if (hw->mac.autotry_restart) { - /* Disable tx laser; allow 100us to go dark per spec */ - esdp_reg |= IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - udelay(100); - - /* Enable tx laser; allow 2ms to light up per spec */ - esdp_reg &= ~IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - msleep(2); - - hw->mac.autotry_restart = false; - } + hw->mac.ops.flap_tx_laser(hw); /* * Wait for the controller to acquire link. Per IEEE 802.3ap, @@ -525,6 +542,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); /* Allow module to change analog characteristics (10G->1G) */ msleep(40); @@ -537,19 +555,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, return status; /* Flap the tx laser if it has not already been done */ - if (hw->mac.autotry_restart) { - /* Disable tx laser; allow 100us to go dark per spec */ - esdp_reg |= IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - udelay(100); - - /* Enable tx laser; allow 2ms to light up per spec */ - esdp_reg &= ~IXGBE_ESDP_SDP3; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - msleep(2); - - hw->mac.autotry_restart = false; - } + hw->mac.ops.flap_tx_laser(hw); /* Wait for the link partner to also set speed */ msleep(100); @@ -1263,7 +1269,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) } if (i >= IXGBE_FDIRCMD_CMD_POLL) { hw_dbg(hw ,"Flow Director previous command isn't complete, " - "aborting table re-initialization. \n"); + "aborting table re-initialization.\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index eb49020903c..6eb5814ca7d 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1484,26 +1484,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) /** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure - * @mc_addr_list: the list of new multicast addresses - * @mc_addr_count: number of addresses - * @next: iterator function to walk the multicast address list + * @netdev: pointer to net device structure * * The given list replaces any existing list. Clears the MC addrs from receive * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next) +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev) { + struct netdev_hw_addr *ha; u32 i; - u32 vmdq; /* * Set the new number of MC addresses that we are being requested to * use. */ - hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); hw->addr_ctrl.mta_in_use = 0; /* Clear the MTA */ @@ -1512,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* Add the new addresses */ - for (i = 0; i < mc_addr_count; i++) { + netdev_for_each_mc_addr(ha, netdev) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + ixgbe_set_mta(hw, ha->addr); } /* Enable mta */ diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 13606d4809c..264eef575cd 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -56,9 +56,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, - ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev); s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 7949a446e4c..8f461d5cee7 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -29,6 +29,7 @@ #include <linux/types.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/ethtool.h> @@ -1853,6 +1854,26 @@ static void ixgbe_diag_test(struct net_device *netdev, if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn(netdev, "%s", + "offline diagnostic is not " + "supported when VFs are " + "present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__IXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + if (if_running) /* indicate we're in test mode */ dev_close(netdev); @@ -1908,6 +1929,7 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); } +skip_ol_tests: msleep_interruptible(4 * 1000); } diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 4123dec0dfb..6493049b663 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -31,6 +31,7 @@ #include "ixgbe_dcb_82599.h" #endif /* CONFIG_IXGBE_DCB */ #include <linux/if_ether.h> +#include <linux/gfp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/fc/fc_fs.h> @@ -202,6 +203,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, addr = sg_dma_address(sg); len = sg_dma_len(sg); while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= IXGBE_BUFFCNT_MAX) { + netif_err(adapter, drv, adapter->netdev, + "xid=%x:%d,%d,%d:addr=%llx " + "not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + /* get the offset of length of current buffer */ thisoff = addr & ((dma_addr_t)bufflen - 1); thislen = min((bufflen - thisoff), len); @@ -227,20 +237,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, len -= thislen; addr += thislen; j++; - /* max number of buffers allowed in one DDP context */ - if (j > IXGBE_BUFFCNT_MAX) { - DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " - "not enough descriptors\n", - xid, i, j, dmacount, (u64)addr); - goto out_noddp_free; - } } } /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); - fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); fcbuff |= (IXGBE_FCBUFF_VALID); @@ -520,6 +523,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) /* Enable L2 eth type filter for FCoE */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); + /* Enable L2 eth type filter for FIP */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), + (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); if (adapter->ring_feature[RING_F_FCOE].indices) { /* Use multiple rx queues for FCoE by redirection table */ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { @@ -530,6 +536,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + fcoe_i = f->mask; + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); } else { /* Use single rx queue for FCoE */ fcoe_i = f->mask; @@ -539,6 +551,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); } + /* send FIP frames to the first FCoE queue */ + fcoe_i = f->mask; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCOELLI | @@ -614,9 +632,9 @@ int ixgbe_fcoe_enable(struct net_device *netdev) netdev->vlan_features |= NETIF_F_FSO; netdev->vlan_features |= NETIF_F_FCOE_MTU; netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; - netdev_features_change(netdev); ixgbe_init_interrupt_scheme(adapter); + netdev_features_change(netdev); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -660,11 +678,11 @@ int ixgbe_fcoe_disable(struct net_device *netdev) netdev->vlan_features &= ~NETIF_F_FSO; netdev->vlan_features &= ~NETIF_F_FCOE_MTU; netdev->fcoe_ddp_xid = 0; - netdev_features_change(netdev); ixgbe_cleanup_fcoe(adapter); - ixgbe_init_interrupt_scheme(adapter); + netdev_features_change(netdev); + if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); rc = 0; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 45e3532b166..a98ff0e76e8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -36,6 +36,7 @@ #include <linux/tcp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> @@ -935,10 +936,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (skb->prev) skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - if (IXGBE_RSC_CB(skb)->dma) + if (IXGBE_RSC_CB(skb)->dma) { pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); + IXGBE_RSC_CB(skb)->dma = 0; + } if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; else @@ -1050,7 +1053,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_bit(...) */ + /* XXX for_each_set_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); @@ -2479,12 +2482,74 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); } +/** + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + vlnctrl |= IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + static void ixgbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 ctrl; - int i, j; if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_disable(adapter); @@ -2495,25 +2560,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, * still receive traffic from a DCB-enabled host even if we're * not in DCB mode. */ - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); - - /* Disable CFI check */ - ctrl &= ~IXGBE_VLNCTRL_CFIEN; - - /* enable VLAN tag stripping */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - ctrl |= IXGBE_VLNCTRL_VME; - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - for (i = 0; i < adapter->num_rx_queues; i++) { - u32 ctrl; - j = adapter->rx_ring[i]->reg_idx; - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); - } - } - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); + ixgbe_vlan_filter_enable(adapter); ixgbe_vlan_rx_add_vid(netdev, 0); @@ -2535,21 +2582,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) } } -static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) -{ - struct dev_mc_list *mc_ptr; - u8 *addr = *mc_addr_ptr; - *vmdq = 0; - - mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); - if (mc_ptr->next) - *mc_addr_ptr = mc_ptr->next->dmi_addr; - else - *mc_addr_ptr = NULL; - - return addr; -} - /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -2563,19 +2595,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u32 fctrl, vlnctrl; - u8 *addr_list = NULL; - int addr_count = 0; + u32 fctrl; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = 1; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vlnctrl &= ~IXGBE_VLNCTRL_VFE; + /* don't hardware filter vlans in promisc mode */ + ixgbe_vlan_filter_disable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; @@ -2583,22 +2613,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } else { fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); } - vlnctrl |= IXGBE_VLNCTRL_VFE; + ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = 0; } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); /* reprogram secondary unicast list */ hw->mac.ops.update_uc_addr_list(hw, netdev); /* reprogram multicast list */ - addr_count = netdev_mc_count(netdev); - if (addr_count) - addr_list = netdev->mc_list->dmi_addr; - hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr); + hw->mac.ops.update_mc_addr_list(hw, netdev); + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); } @@ -2658,7 +2684,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 txdctl, vlnctrl; + u32 txdctl; int i, j; ixgbe_dcb_check_config(&adapter->dcb_cfg); @@ -2676,22 +2702,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); } /* Enable VLAN tag insert/strip */ - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - if (hw->mac.type == ixgbe_mac_82598EB) { - vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - } else if (hw->mac.type == ixgbe_mac_82599EB) { - vlnctrl |= IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); - vlnctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); - } - } + ixgbe_vlan_filter_enable(adapter); + hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); } @@ -3054,6 +3066,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); ixgbe_up(adapter); clear_bit(__IXGBE_RESETTING, &adapter->state); } @@ -3126,10 +3146,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; - if (IXGBE_RSC_CB(this)->dma) + if (IXGBE_RSC_CB(this)->dma) { pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); + IXGBE_RSC_CB(this)->dma = 0; + } skb = skb->prev; dev_kfree_skb(this); } while (skb); @@ -3232,13 +3254,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* disable receive for all VFs and wait one second */ if (adapter->num_vfs) { - for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = 0; - /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); + /* Disable all VFTE/VFRE TX/RX */ ixgbe_disable_tx_rx(adapter); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; } /* disable receives */ @@ -3456,12 +3480,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues = 1; #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); + DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); ixgbe_set_dcb_queues(adapter); } #endif if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); + DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ixgbe_set_fdir_queues(adapter); @@ -5018,6 +5042,7 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + hw->mac.autotry_restart = false; if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, autoneg, negotiation, true); adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -5076,7 +5101,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) &(adapter->tx_ring[i]->reinit_state)); } else { DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " - "ignored adding FDIR ATR filters \n"); + "ignored adding FDIR ATR filters\n"); } /* Done FDIR Re-initialization, enable transmits */ netif_tx_start_all_queues(adapter->netdev); @@ -5633,7 +5658,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) #ifdef IXGBE_FCOE if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (skb->protocol == htons(ETH_P_FCOE))) { + ((skb->protocol == htons(ETH_P_FCOE)) || + (skb->protocol == htons(ETH_P_FIP)))) { txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); txq += adapter->ring_feature[RING_F_FCOE].mask; return txq; @@ -5680,18 +5706,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, tx_ring = adapter->tx_ring[skb->queue_mapping]; - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (skb->protocol == htons(ETH_P_FCOE))) { - tx_flags |= IXGBE_TX_FLAGS_FCOE; #ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { #ifdef CONFIG_IXGBE_DCB - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); -#endif + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if ((skb->protocol == htons(ETH_P_FCOE)) || + (skb->protocol == htons(ETH_P_FIP))) { + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK + << IXGBE_TX_FLAGS_VLAN_SHIFT); + tx_flags |= ((adapter->fcoe.up << 13) + << IXGBE_TX_FLAGS_VLAN_SHIFT); + } #endif + /* flag for FCoE offloads */ + if (skb->protocol == htons(ETH_P_FCOE)) + tx_flags |= IXGBE_TX_FLAGS_FCOE; } +#endif + /* four things can cause us to need a context descriptor */ if (skb_is_gso(skb) || (skb->ip_summed == CHECKSUM_PARTIAL) || @@ -6046,7 +6079,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif - indices = min_t(unsigned int, indices, MAX_TX_QUEUES); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; @@ -6245,9 +6277,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, case IXGBE_DEV_ID_82599_KX4: adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | IXGBE_WUFC_MC | IXGBE_WUFC_BC); - /* Enable ACPI wakeup in GRC */ - IXGBE_WRITE_REG(hw, IXGBE_GRC, - (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME)); break; default: adapter->wol = 0; @@ -6380,6 +6409,16 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) del_timer_sync(&adapter->sfp_timer); cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->sfp_task); + if (adapter->hw.phy.multispeed_fiber) { + struct ixgbe_hw *hw = &adapter->hw; + /* + * Restart clause 37 autoneg, disable and re-enable + * the tx laser, to clear & alert the link partner + * that it needs to restart autotry + */ + hw->mac.autotry_restart = true; + hw->mac.ops.flap_tx_laser(hw); + } cancel_work_sync(&adapter->multispeed_fiber_task); cancel_work_sync(&adapter->sfp_config_module_task); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 1c1efd38695..d6d5b843d62 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) msleep(edata); break; case IXGBE_DATA_NL: - hw_dbg(hw, "DATA: \n"); + hw_dbg(hw, "DATA:\n"); data_offset++; hw->eeprom.ops.read(hw, data_offset++, &phy_offset); @@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) break; case IXGBE_CONTROL_NL: data_offset++; - hw_dbg(hw, "CONTROL: \n"); + hw_dbg(hw, "CONTROL:\n"); if (edata == IXGBE_CONTROL_EOL_NL) { hw_dbg(hw, "EOL\n"); end_data = true; diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 2be90746659..aed4ed66564 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1298,6 +1298,7 @@ #define IXGBE_ETQF_FILTER_BCN 1 #define IXGBE_ETQF_FILTER_FCOE 2 #define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ @@ -2397,6 +2398,7 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); /* Link */ + void (*flap_tx_laser)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, @@ -2415,8 +2417,7 @@ struct ixgbe_mac_operations { s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c index 399be0c34c3..4680b069b84 100644 --- a/drivers/net/ixgbevf/ethtool.c +++ b/drivers/net/ixgbevf/ethtool.c @@ -29,6 +29,7 @@ #include <linux/types.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/ethtool.h> @@ -46,22 +47,32 @@ struct ixgbe_stats { int sizeof_stat; int stat_offset; int base_stat_offset; + int saved_reset_offset; }; -#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \ - offsetof(struct ixgbevf_adapter, m), \ - offsetof(struct ixgbevf_adapter, b) +#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ + offsetof(struct ixgbevf_adapter, m), \ + offsetof(struct ixgbevf_adapter, b), \ + offsetof(struct ixgbevf_adapter, r) static struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)}, - {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)}, - {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)}, - {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)}, - {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)}, - {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)}, - {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)}, - {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)}, - {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)}, - {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)}, + {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, + stats.saved_reset_vfgprc)}, + {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, + stats.saved_reset_vfgptc)}, + {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, + stats.saved_reset_vfgorc)}, + {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, + stats.saved_reset_vfgotc)}, + {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, + {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, + stats.saved_reset_vfmprc)}, + {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, + zero_base)}, + {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, + zero_base)}, + {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, + zero_base)}, + {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, }; #define IXGBE_QUEUE_STATS_LEN 0 @@ -455,10 +466,14 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ixgbe_gstrings_stats[i].stat_offset; char *b = (char *)adapter + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = (char *)adapter + + ixgbe_gstrings_stats[i].saved_reset_offset; data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)b : *(u32 *)b); + sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + + ((ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)r : *(u32 *)r); } } diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 235b5fd4b8d..f484161418b 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -39,6 +39,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> @@ -603,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * packets not getting split correctly */ if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb->len - skb->data_len; + u32 header_fixup_len = skb_headlen(skb); if (header_fixup_len < 14) skb_push(skb, header_fixup_len); } skb->protocol = eth_type_trans(skb, adapter->netdev); ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); - adapter->netdev->last_rx = jiffies; next_desc: rx_desc->wb.upper.status_error = 0; @@ -751,7 +751,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_bit(...) */ + /* XXX for_each_set_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); @@ -965,7 +965,7 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 10)); + round_jiffies(jiffies + 1)); return IRQ_HANDLED; } @@ -1495,22 +1495,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) } } -static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, - u32 *vmdq) -{ - struct dev_mc_list *mc_ptr; - u8 *addr = *mc_addr_ptr; - *vmdq = 0; - - mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); - if (mc_ptr->next) - *mc_addr_ptr = mc_ptr->next->dmi_addr; - else - *mc_addr_ptr = NULL; - - return addr; -} - /** * ixgbevf_set_rx_mode - Multicast set * @netdev: network interface device structure @@ -1523,16 +1507,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u8 *addr_list = NULL; - int addr_count = 0; /* reprogram multicast list */ - addr_count = netdev_mc_count(netdev); - if (addr_count) - addr_list = netdev->mc_list->dmi_addr; if (hw->mac.ops.update_mc_addr_list) - hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - ixgbevf_addr_list_itr); + hw->mac.ops.update_mc_addr_list(hw, netdev); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) @@ -1610,6 +1588,44 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, (adapter->rx_ring[rxr].count - 1)); } +static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) +{ + /* Only save pre-reset stats if there are some */ + if (adapter->stats.vfgprc || adapter->stats.vfgptc) { + adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - + adapter->stats.base_vfgprc; + adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - + adapter->stats.base_vfgptc; + adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - + adapter->stats.base_vfgorc; + adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - + adapter->stats.base_vfgotc; + adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - + adapter->stats.base_vfmprc; + } +} + +static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; + adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; + adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; + adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; +} + static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1656,6 +1672,9 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(netdev); + ixgbevf_save_reset_stats(adapter); + ixgbevf_init_last_counter_stats(adapter); + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -2228,27 +2247,6 @@ out: return err; } -static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); - adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); - adapter->stats.last_vfgorc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); - adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); - adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); - adapter->stats.last_vfgotc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); - adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); - - adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; - adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; - adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; - adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; - adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; -} - #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ u32 current_counter = IXGBE_READ_REG(hw, reg); \ @@ -2397,9 +2395,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work) if (link_up) { if (!netif_carrier_ok(netdev)) { - hw_dbg(&adapter->hw, "NIC Link is Up %s, ", - ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - "10 Gbps" : "1 Gbps")); + hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + 10 : 1); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } else { @@ -2416,9 +2414,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work) } } -pf_has_reset: ixgbevf_update_stats(adapter); +pf_has_reset: /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = true; @@ -2675,7 +2673,7 @@ static int ixgbevf_open(struct net_device *netdev) if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; printk(KERN_ERR "Unable to start - perhaps the PF" - "Driver isn't up yet\n"); + " Driver isn't up yet\n"); goto err_setup_reset; } } @@ -2923,9 +2921,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; - unsigned int offset = 0, size, count = 0, i; + unsigned int offset = 0, size, count = 0; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; + int i; i = tx_ring->next_to_use; @@ -3390,8 +3389,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, /* setup the private structure */ err = ixgbevf_sw_init(adapter); - ixgbevf_init_last_counter_stats(adapter); - #ifdef MAX_SKB_FRAGS netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -3449,6 +3446,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, adapter->netdev_registered = true; + ixgbevf_init_last_counter_stats(adapter); + /* print the MAC address */ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", netdev->dev_addr[0], @@ -3460,7 +3459,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, hw_dbg(hw, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "LRO is disabled \n"); + hw_dbg(hw, "LRO is disabled\n"); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); cards_found++; diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c index 4b5dec0ec14..852e9c4fd93 100644 --- a/drivers/net/ixgbevf/vf.c +++ b/drivers/net/ixgbevf/vf.c @@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, /** * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * @next: caller supplied function to return next address in list + * @netdev: pointer to net device structure * * Updates the Multicast Table Array. **/ -static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, - ixgbe_mc_addr_itr next) +static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) { + struct netdev_hw_addr *ha; struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u16 *vector_list = (u16 *)&msgbuf[1]; - u32 vector; u32 cnt, i; - u32 vmdq; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the @@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, * addresses except for in large enterprise network environments. */ - cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + cnt = netdev_mc_count(netdev); + if (cnt > 30) + cnt = 30; msgbuf[0] = IXGBE_VF_SET_MULTICAST; msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; - for (i = 0; i < cnt; i++) { - vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); - vector_list[i] = vector; + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == cnt) + break; + vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h index 799600e9270..94b750b8874 100644 --- a/drivers/net/ixgbevf/vf.h +++ b/drivers/net/ixgbevf/vf.h @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/if_ether.h> +#include <linux/netdevice.h> #include "defines.h" #include "regs.h" @@ -62,8 +63,7 @@ struct ixgbe_mac_operations { /* RAR, Multicast, VLAN */ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); @@ -157,6 +157,12 @@ struct ixgbevf_hw_stats { u64 vfgorc; u64 vfgotc; u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; }; struct ixgbevf_info { diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index e9d9d595e1b..d5932ca3e27 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -15,6 +15,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> +#include <linux/gfp.h> #include <asm/hardware/uengine.h> #include <asm/io.h> #include "ixp2400_rx.ucode" diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index f47d4d663b1..3e6aaf9e5ce 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -22,11 +22,11 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> +#include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> @@ -35,6 +35,7 @@ #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <asm/bootinfo.h> #include <asm/system.h> diff --git a/drivers/net/jme.c b/drivers/net/jme.c index 0f31497833d..4e868eeac89 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -37,6 +37,7 @@ #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_vlan.h> +#include <linux/slab.h> #include <net/ip6_checksum.h> #include "jme.h" @@ -946,6 +947,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) jme->jme_vlan_rx(skb, jme->vlgrp, le16_to_cpu(rxdesc->descwb.vlan)); NET_STAT(jme).rx_bytes += 4; + } else { + dev_kfree_skb(skb); } } else { jme->jme_rx(skb); @@ -2007,12 +2010,12 @@ jme_set_multi(struct net_device *netdev) } else if (netdev->flags & IFF_ALLMULTI) { jme->reg_rxmcs |= RXMCS_ALLMULFRAME; } else if (netdev->flags & IFF_MULTICAST) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int bit_nr; jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; - netdev_for_each_mc_addr(mclist, netdev) { - bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); } @@ -2081,12 +2084,45 @@ jme_tx_timeout(struct net_device *netdev) jme_reset_link(jme); } +static inline void jme_pause_rx(struct jme_adapter *jme) +{ + atomic_dec(&jme->link_changing); + + jme_set_rx_pcc(jme, PCC_OFF); + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_DISABLE(jme); + } else { + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + } +} + +static inline void jme_resume_rx(struct jme_adapter *jme) +{ + struct dynpcc_info *dpi = &(jme->dpi); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_ENABLE(jme); + } else { + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + } + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + + atomic_inc(&jme->link_changing); +} + static void jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct jme_adapter *jme = netdev_priv(netdev); + jme_pause_rx(jme); jme->vlgrp = grp; + jme_resume_rx(jme); } static void diff --git a/drivers/net/jme.h b/drivers/net/jme.h index c19db9146a2..07ad3a45718 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h @@ -25,7 +25,7 @@ #define __JME_H_INCLUDED__ #define DRV_NAME "jme" -#define DRV_VERSION "1.0.5" +#define DRV_VERSION "1.0.6" #define PFX DRV_NAME ": " #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 diff --git a/drivers/net/korina.c b/drivers/net/korina.c index 300c2249812..26bf1b76b99 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c @@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); unsigned long flags; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ int i; @@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; @@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); dev->base_addr = r->start; - lp->eth_regs = ioremap_nocache(r->start, r->end - r->start); + lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); if (!lp->eth_regs) { printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); rc = -ENXIO; @@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); - lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start); + lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); if (!lp->rx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); rc = -ENXIO; @@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); - lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start); + lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); if (!lp->tx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); rc = -ENXIO; diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index 5c45cb58d02..b91492f4e48 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c @@ -20,6 +20,8 @@ * The Micrel KS8842 behind the timberdale FPGA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -525,8 +527,7 @@ static int ks8842_open(struct net_device *netdev) err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, adapter); if (err) { - printk(KERN_ERR "Failed to request IRQ: %d: %d\n", - adapter->irq, err); + pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); return err; } @@ -668,8 +669,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev) platform_set_drvdata(pdev, netdev); - printk(KERN_INFO DRV_NAME - " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", + pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); return 0; diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index b5219cce12e..4dcd61f81ec 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -9,6 +9,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #define DEBUG #include <linux/module.h> @@ -125,11 +127,6 @@ struct ks8851_net { static int msg_enable; -#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg) -#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg) -#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg) -#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg) - /* shift for byte-enable data */ #define BYTE_EN(_x) ((_x) << 2) @@ -167,7 +164,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) ret = spi_sync(ks->spidev, msg); if (ret < 0) - ks_err(ks, "spi_sync() failed\n"); + netdev_err(ks->netdev, "spi_sync() failed\n"); } /** @@ -197,7 +194,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) ret = spi_sync(ks->spidev, msg); if (ret < 0) - ks_err(ks, "spi_sync() failed\n"); + netdev_err(ks->netdev, "spi_sync() failed\n"); } /** @@ -263,7 +260,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, ret = spi_sync(ks->spidev, msg); if (ret < 0) - ks_err(ks, "read: spi_sync() failed\n"); + netdev_err(ks->netdev, "read: spi_sync() failed\n"); else if (ks8851_rx_1msg(ks)) memcpy(rxb, trx + 2, rxl); else @@ -407,7 +404,7 @@ static irqreturn_t ks8851_irq(int irq, void *pw) * @buff: The buffer address * @len: The length of the data to read * - * Issue an RXQ FIFO read command and read the @len ammount of data from + * Issue an RXQ FIFO read command and read the @len amount of data from * the FIFO into the buffer specified by @buff. */ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) @@ -417,8 +414,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) u8 txb[1]; int ret; - if (netif_msg_rx_status(ks)) - ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff); + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d@%p\n", __func__, len, buff); /* set the operation we're issuing */ txb[0] = KS_SPIOP_RXFIFO; @@ -434,7 +431,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) ret = spi_sync(ks->spidev, msg); if (ret < 0) - ks_err(ks, "%s: spi_sync() failed\n", __func__); + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); } /** @@ -446,10 +443,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) */ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) { - ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", - rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], - rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], - rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); + netdev_dbg(ks->netdev, + "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], + rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], + rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); } /** @@ -471,8 +469,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) rxfc = ks8851_rdreg8(ks, KS_RXFC); - if (netif_msg_rx_status(ks)) - ks_dbg(ks, "%s: %d packets\n", __func__, rxfc); + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d packets\n", __func__, rxfc); /* Currently we're issuing a read per packet, but we could possibly * improve the code by issuing a single read, getting the receive @@ -489,9 +487,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) rxstat = rxh & 0xffff; rxlen = rxh >> 16; - if (netif_msg_rx_status(ks)) - ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n", - rxstat, rxlen); + netif_dbg(ks, rx_status, ks->netdev, + "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); /* the length of the packet includes the 32bit CRC */ @@ -553,9 +550,8 @@ static void ks8851_irq_work(struct work_struct *work) status = ks8851_rdreg16(ks, KS_ISR); - if (netif_msg_intr(ks)) - dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n", - __func__, status); + netif_dbg(ks, intr, ks->netdev, + "%s: status 0x%04x\n", __func__, status); if (status & IRQ_LCI) { /* should do something about checking link status */ @@ -582,8 +578,8 @@ static void ks8851_irq_work(struct work_struct *work) * system */ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); - if (netif_msg_intr(ks)) - ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space); + netif_dbg(ks, intr, ks->netdev, + "%s: txspace %d\n", __func__, ks->tx_space); } if (status & IRQ_RXI) @@ -659,9 +655,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) unsigned fid = 0; int ret; - if (netif_msg_tx_queued(ks)) - dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n", - __func__, txp, txp->len, txp->data, irq); + netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", + __func__, txp, txp->len, txp->data, irq); fid = ks->fid++; fid &= TXFR_TXFID_MASK; @@ -685,7 +680,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) ret = spi_sync(ks->spidev, msg); if (ret < 0) - ks_err(ks, "%s: spi_sync() failed\n", __func__); + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); } /** @@ -744,8 +739,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) { unsigned pmecr; - if (netif_msg_hw(ks)) - ks_dbg(ks, "setting power mode %d\n", pwrmode); + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); pmecr = ks8851_rdreg16(ks, KS_PMECR); pmecr &= ~PMECR_PM_MASK; @@ -769,8 +763,7 @@ static int ks8851_net_open(struct net_device *dev) * else at the moment */ mutex_lock(&ks->lock); - if (netif_msg_ifup(ks)) - ks_dbg(ks, "opening %s\n", dev->name); + netif_dbg(ks, ifup, ks->netdev, "opening\n"); /* bring chip out of any power saving mode it was in */ ks8851_set_powermode(ks, PMECR_PM_NORMAL); @@ -826,8 +819,7 @@ static int ks8851_net_open(struct net_device *dev) netif_start_queue(ks->netdev); - if (netif_msg_ifup(ks)) - ks_dbg(ks, "network device %s up\n", dev->name); + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); mutex_unlock(&ks->lock); return 0; @@ -845,8 +837,7 @@ static int ks8851_net_stop(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); - if (netif_msg_ifdown(ks)) - ks_info(ks, "%s: shutting down\n", dev->name); + netif_info(ks, ifdown, dev, "shutting down\n"); netif_stop_queue(dev); @@ -874,8 +865,8 @@ static int ks8851_net_stop(struct net_device *dev) while (!skb_queue_empty(&ks->txq)) { struct sk_buff *txb = skb_dequeue(&ks->txq); - if (netif_msg_ifdown(ks)) - ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb); + netif_dbg(ks, ifdown, ks->netdev, + "%s: freeing txb %p\n", __func__, txb); dev_kfree_skb(txb); } @@ -904,9 +895,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, unsigned needed = calc_txlen(skb->len); netdev_tx_t ret = NETDEV_TX_OK; - if (netif_msg_tx_queued(ks)) - ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__, - skb, skb->len, skb->data); + netif_dbg(ks, tx_queued, ks->netdev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); spin_lock(&ks->statelock); @@ -966,17 +956,16 @@ static void ks8851_set_rx_mode(struct net_device *dev) rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA | RXCR1_RXMAFMA); } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; u32 crc; /* accept some multicast */ - netdev_for_each_mc_addr(mcptr, dev) { - crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); crc >>= (32 - 6); /* get top six bits */ rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); - mcptr = mcptr->next; } rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; @@ -1186,17 +1175,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks) rd = ks8851_rdreg16(ks, KS_MBIR); if ((rd & both_done) != both_done) { - ks_warn(ks, "Memory selftest not finished\n"); + netdev_warn(ks->netdev, "Memory selftest not finished\n"); return 0; } if (rd & MBIR_TXMBFA) { - ks_err(ks, "TX memory selftest fail\n"); + netdev_err(ks->netdev, "TX memory selftest fail\n"); ret |= 1; } if (rd & MBIR_RXMBFA) { - ks_err(ks, "RX memory selftest fail\n"); + netdev_err(ks->netdev, "RX memory selftest fail\n"); ret |= 2; } @@ -1294,9 +1283,9 @@ static int __devinit ks8851_probe(struct spi_device *spi) goto err_netdev; } - dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n", - CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), - ndev->dev_addr, ndev->irq); + netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n", + CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), + ndev->dev_addr, ndev->irq); return 0; @@ -1315,7 +1304,7 @@ static int __devexit ks8851_remove(struct spi_device *spi) struct ks8851_net *priv = dev_get_drvdata(&spi->dev); if (netif_msg_drv(priv)) - dev_info(&spi->dev, "remove"); + dev_info(&spi->dev, "remove\n"); unregister_netdev(priv->netdev); free_irq(spi->irq, priv); diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c index 84b0e15831f..2e2c69b2406 100644 --- a/drivers/net/ks8851_mll.c +++ b/drivers/net/ks8851_mll.c @@ -21,6 +21,8 @@ * KS8851 16bit MLL chip from Micrel Inc. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> @@ -31,6 +33,7 @@ #include <linux/mii.h> #include <linux/platform_device.h> #include <linux/delay.h> +#include <linux/slab.h> #define DRV_NAME "ks8851_mll" @@ -360,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; #define MAX_MCAST_LST 32 #define HW_MCAST_SIZE 8 -#define MAC_ADDR_LEN 6 /** * union ks_tx_hdr - tx header data @@ -448,7 +450,7 @@ struct ks_net { u16 promiscuous; u16 all_mcast; u16 mcast_lst_size; - u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN]; + u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN]; u8 mcast_bits[HW_MCAST_SIZE]; u8 mac_addr[6]; u8 fid; @@ -458,11 +460,6 @@ struct ks_net { static int msg_enable; -#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg) -#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg) -#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg) -#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg) - #define BE3 0x8000 /* Byte Enable 3 */ #define BE2 0x4000 /* Byte Enable 2 */ #define BE1 0x2000 /* Byte Enable 1 */ @@ -624,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode) { unsigned pmecr; - if (netif_msg_hw(ks)) - ks_dbg(ks, "setting power mode %d\n", pwrmode); + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); ks_rdreg16(ks, KS_GRR); pmecr = ks_rdreg16(ks, KS_PMECR); @@ -805,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev) /* read data block including CRC 4 bytes */ ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); skb_put(skb, frame_hdr->len); - skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); } else { - printk(KERN_ERR "%s: err:skb alloc\n", __func__); + pr_err("%s: err:skb alloc\n", __func__); ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); if (skb) dev_kfree_skb_irq(skb); @@ -836,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) netif_carrier_off(netdev); link_up_status = false; } - if (netif_msg_link(ks)) - ks_dbg(ks, "%s: %s\n", - __func__, link_up_status ? "UP" : "DOWN"); + netif_dbg(ks, link, ks->netdev, + "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN"); } /** @@ -908,15 +902,13 @@ static int ks_net_open(struct net_device *netdev) * else at the moment. */ - if (netif_msg_ifup(ks)) - ks_dbg(ks, "%s - entry\n", __func__); + netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__); /* reset the HW */ err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); if (err) { - printk(KERN_ERR "Failed to request IRQ: %d: %d\n", - ks->irq, err); + pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err); return err; } @@ -929,8 +921,7 @@ static int ks_net_open(struct net_device *netdev) ks_enable_qmu(ks); netif_start_queue(ks->netdev); - if (netif_msg_ifup(ks)) - ks_dbg(ks, "network device %s up\n", netdev->name); + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); return 0; } @@ -947,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev) { struct ks_net *ks = netdev_priv(netdev); - if (netif_msg_ifdown(ks)) - ks_info(ks, "%s: shutting down\n", netdev->name); + netif_info(ks, ifdown, netdev, "shutting down\n"); netif_stop_queue(netdev); @@ -1180,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast) static void ks_set_rx_mode(struct net_device *netdev) { struct ks_net *ks = netdev_priv(netdev); - struct dev_mc_list *ptr; + struct netdev_hw_addr *ha; /* Turn on/off promiscuous mode. */ if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) @@ -1197,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev) if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { int i = 0; - netdev_for_each_mc_addr(ptr, netdev) { - if (!(*ptr->dmi_addr & 1)) + netdev_for_each_mc_addr(ha, netdev) { + if (!(*ha->addr & 1)) continue; if (i >= MAX_MCAST_LST) break; - memcpy(ks->mcast_lst[i++], ptr->dmi_addr, - MAC_ADDR_LEN); + memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN); } ks->mcast_lst_size = (u8)i; ks_set_grpaddr(ks); @@ -1429,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks) rd = ks_rdreg16(ks, KS_MBIR); if ((rd & both_done) != both_done) { - ks_warn(ks, "Memory selftest not finished\n"); + netdev_warn(ks->netdev, "Memory selftest not finished\n"); return 0; } if (rd & MBIR_TXMBFA) { - ks_err(ks, "TX memory selftest fails\n"); + netdev_err(ks->netdev, "TX memory selftest fails\n"); ret |= 1; } if (rd & MBIR_RXMBFA) { - ks_err(ks, "RX memory selftest fails\n"); + netdev_err(ks->netdev, "RX memory selftest fails\n"); ret |= 2; } - ks_info(ks, "the selftest passes\n"); + netdev_info(ks->netdev, "the selftest passes\n"); return ret; } @@ -1514,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks) ks->frame_head_info = (struct type_frame_head *) \ kmalloc(MHEADER_SIZE, GFP_KERNEL); if (!ks->frame_head_info) { - printk(KERN_ERR "Error: Fail to allocate frame memory\n"); + pr_err("Error: Fail to allocate frame memory\n"); return false; } @@ -1580,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev) ks->mii.mdio_read = ks_phy_read; ks->mii.mdio_write = ks_phy_write; - ks_info(ks, "message enable is %d\n", msg_enable); + netdev_info(netdev, "message enable is %d\n", msg_enable); /* set the default message enable */ ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | NETIF_MSG_PROBE | @@ -1589,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev) /* simple check for a valid chip being connected to the bus */ if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { - ks_err(ks, "failed to read device ID\n"); + netdev_err(netdev, "failed to read device ID\n"); err = -ENODEV; goto err_register; } if (ks_read_selftest(ks)) { - ks_err(ks, "failed to read device ID\n"); + netdev_err(netdev, "failed to read device ID\n"); err = -ENODEV; goto err_register; } @@ -1626,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev) id = ks_rdreg16(ks, KS_CIDER); - printk(KERN_INFO DRV_NAME - " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", - (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); + netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", + (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); return 0; err_register: diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 7264a3e5c2c..cc0bc8a2608 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -14,10 +14,11 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/version.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/proc_fs.h> @@ -30,6 +31,7 @@ #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/sched.h> +#include <linux/slab.h> /* DMA Registers */ @@ -1483,11 +1485,6 @@ struct dev_priv { int promiscuous; }; -#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg) -#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg) -#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg) -#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg) - #define DRV_NAME "KSZ884X PCI" #define DEVICE_NAME "KSZ884x PCI" #define DRV_VERSION "1.0.0" @@ -3834,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info) alloc >>= 1; } if (alloc != 1 || shift < MIN_DESC_SHIFT) { - printk(KERN_ALERT "Hardware descriptor numbers not right!\n"); + pr_alert("Hardware descriptor numbers not right!\n"); while (alloc) { shift++; alloc >>= 1; @@ -4545,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter) (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / DESC_ALIGNMENT) * DESC_ALIGNMENT); if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) - printk(KERN_ALERT - "Hardware descriptor size not right!\n"); + pr_alert("Hardware descriptor size not right!\n"); ksz_check_desc_num(&hw->rx_desc_info); ksz_check_desc_num(&hw->tx_desc_info); @@ -4688,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) int frag; skb_frag_t *this_frag; - dma_buf->len = skb->len - skb->data_len; + dma_buf->len = skb_headlen(skb); dma_buf->dma = pci_map_single( hw_priv->pdev, skb->data, dma_buf->len, @@ -4899,8 +4895,10 @@ static int netdev_tx(struct sk_buff *skb, struct net_device *dev) struct sk_buff *org_skb = skb; skb = dev_alloc_skb(org_skb->len); - if (!skb) - return NETDEV_TX_BUSY; + if (!skb) { + rc = NETDEV_TX_BUSY; + goto unlock; + } skb_copy_and_csum_dev(org_skb, skb->data); org_skb->ip_summed = 0; skb->len = org_skb->len; @@ -4914,7 +4912,7 @@ static int netdev_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); rc = NETDEV_TX_BUSY; } - +unlock: spin_unlock_irq(&hw_priv->hwlock); return rc; @@ -5046,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, dma_buf->skb->data, packet_len); } while (0); - skb->dev = dev; - skb->protocol = eth_type_trans(skb, dev); if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) @@ -5058,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, priv->stats.rx_bytes += packet_len; /* Notify upper layer for received packet. */ - dev->last_rx = jiffies; - rx_status = netif_rx(skb); return 0; @@ -5317,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id) u32 data; hw->intr_mask &= ~KS884X_INT_TX_STOPPED; - printk(KERN_INFO "Tx stopped\n"); + pr_info("Tx stopped\n"); data = readl(hw->io + KS_DMA_TX_CTRL); if (!(data & DMA_TX_ENABLE)) - printk(KERN_INFO "Tx disabled\n"); + pr_info("Tx disabled\n"); break; } } while (0); @@ -5493,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev) return 0; } +static void set_media_state(struct net_device *dev, int media_state) +{ + struct dev_priv *priv = netdev_priv(dev); + + if (media_state == priv->media_state) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + netif_info(priv, link, dev, "link %s\n", + media_state == priv->media_state ? "on" : "off"); +} + /** * netdev_open - open network device * @dev: Network device. @@ -5582,15 +5588,7 @@ static int netdev_open(struct net_device *dev) priv->media_state = port->linked->state; - if (media_connected == priv->media_state) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - if (netif_msg_link(priv)) - printk(KERN_INFO "%s link %s\n", dev->name, - (media_connected == priv->media_state ? - "on" : "off")); - + set_media_state(dev, media_connected); netif_start_queue(dev); return 0; @@ -5764,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev) struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; struct ksz_hw *hw = &hw_priv->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; int multicast = (dev->flags & IFF_ALLMULTI); dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); @@ -5781,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev) int i = 0; /* List too big to support so turn on all multicast mode. */ - if (dev->mc_count > MAX_MULTICAST_LIST) { + if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) { if (MAX_MULTICAST_LIST != hw->multi_list_size) { hw->multi_list_size = MAX_MULTICAST_LIST; ++hw->all_multi; @@ -5790,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev) return; } - netdev_for_each_mc_addr(mc_ptr, dev) { - if (!(*mc_ptr->dmi_addr & 1)) + netdev_for_each_mc_addr(ha, dev) { + if (!(*ha->addr & 1)) continue; if (i >= MAX_MULTICAST_LIST) break; - memcpy(hw->multi_list[i++], mc_ptr->dmi_addr, - MAC_ADDR_LEN); + memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN); } hw->multi_list_size = (u8) i; hw_set_grp_addr(hw); @@ -6320,7 +6317,7 @@ static int netdev_set_eeprom(struct net_device *dev, int len; if (eeprom->magic != EEPROM_MAGIC) - return 1; + return -EINVAL; len = (eeprom->offset + eeprom->len + 1) / 2; for (i = eeprom->offset / 2; i < len; i++) @@ -6680,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv, { if (priv->media_state != port->linked->state) { priv->media_state = port->linked->state; - if (netif_running(dev)) { - if (media_connected == priv->media_state) - netif_carrier_on(dev); - else - netif_carrier_off(dev); - if (netif_msg_link(priv)) - printk(KERN_INFO "%s link %s\n", dev->name, - (media_connected == priv->media_state ? - "on" : "off")); - } + if (netif_running(dev)) + set_media_state(dev, media_connected); } } @@ -6983,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev, int pi; int port_count; int result; - char banner[80]; + char banner[sizeof(version)]; struct ksz_switch *sw = NULL; result = pci_enable_device(pdev); @@ -7007,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev, result = -ENOMEM; - info = kmalloc(sizeof(struct platform_info), GFP_KERNEL); + info = kzalloc(sizeof(struct platform_info), GFP_KERNEL); if (!info) goto pcidev_init_dev_err; - memset(info, 0, sizeof(struct platform_info)); hw_priv = &info->dev_info; hw_priv->pdev = pdev; @@ -7024,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev, cnt = hw_init(hw); if (!cnt) { if (msg_enable & NETIF_MSG_PROBE) - printk(KERN_ALERT "chip not detected\n"); + pr_alert("chip not detected\n"); result = -ENODEV; goto pcidev_init_alloc_err; } - sprintf(banner, "%s\n", version); - banner[13] = cnt + '0'; - ks_info(hw_priv, "%s", banner); - ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); + snprintf(banner, sizeof(banner), "%s", version); + banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */ + dev_info(&hw_priv->pdev->dev, "%s\n", banner); + dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); /* Assume device is KSZ8841. */ hw->dev_count = 1; diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c index b77238dbafb..6eba352c52e 100644 --- a/drivers/net/lasi_82596.c +++ b/drivers/net/lasi_82596.c @@ -74,7 +74,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 443c39a3732..61c38ab4c24 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -73,7 +73,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> @@ -85,6 +84,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/gfp.h> /* DEBUG flags */ @@ -1388,7 +1388,7 @@ static void set_multicast_list(struct net_device *dev) } if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *cp; struct mc_cmd *cmd; @@ -1396,10 +1396,10 @@ static void set_multicast_list(struct net_device *dev) cmd->cmd.command = SWAP16(CmdMulticastList); cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); cp = cmd->mc_addrs; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!cnt--) break; - memcpy(cp, dmi->dmi_addr, 6); + memcpy(cp, ha->addr, 6); if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 56f66f48540..526dc9cbc3c 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -905,10 +905,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev) static inline void make_mc_bits(u8 *bits, struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(dmi, dev) { - u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h index 1af66a1e691..c03358434ac 100644 --- a/drivers/net/ll_temac.h +++ b/drivers/net/ll_temac.h @@ -5,8 +5,11 @@ #include <linux/netdevice.h> #include <linux/of.h> #include <linux/spinlock.h> + +#ifdef CONFIG_PPC_DCR #include <asm/dcr.h> #include <asm/dcr-regs.h> +#endif /* packet size info */ #define XTE_HDR_SIZE 14 /* size of Ethernet header */ @@ -290,9 +293,6 @@ This option defaults to enabled (set) */ #define TX_CONTROL_CALC_CSUM_MASK 1 -#define XTE_ALIGN 32 -#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN) - #define MULTICAST_CAM_TABLE_NUM 4 /* TX/RX CURDESC_PTR points to first descriptor */ @@ -335,9 +335,15 @@ struct temac_local { struct mii_bus *mii_bus; /* MII bus reference */ int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ - /* IO registers and IRQs */ + /* IO registers, dma functions and IRQs */ void __iomem *regs; + void __iomem *sdma_regs; +#ifdef CONFIG_PPC_DCR dcr_host_t sdma_dcrs; +#endif + u32 (*dma_in)(struct temac_local *, int); + void (*dma_out)(struct temac_local *, int, u32); + int tx_irq; int rx_irq; int emac_num; diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index a18e3485476..78c9a2e6e51 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -20,9 +20,6 @@ * or rx, so this should be okay. * * TODO: - * - Fix driver to work on more than just Virtex5. Right now the driver - * assumes that the locallink DMA registers are accessed via DCR - * instructions. * - Factor out locallink DMA code into separate driver * - Fix multicast assignment. * - Fix support for hardware checksumming. @@ -49,6 +46,7 @@ #include <linux/in.h> #include <linux/io.h> #include <linux/ip.h> +#include <linux/slab.h> #include "ll_temac.h" @@ -115,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); } +/** + * temac_dma_in32 - Memory mapped DMA read, this function expects a + * register input that is based on DCR word addresses which + * are then converted to memory mapped byte addresses + */ static u32 temac_dma_in32(struct temac_local *lp, int reg) { - return dcr_read(lp->sdma_dcrs, reg); + return in_be32((u32 *)(lp->sdma_regs + (reg << 2))); } +/** + * temac_dma_out32 - Memory mapped DMA read, this function expects a + * register input that is based on DCR word addresses which + * are then converted to memory mapped byte addresses + */ static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) { + out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value); +} + +/* DMA register access functions can be DCR based or memory mapped. + * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both + * memory mapped. + */ +#ifdef CONFIG_PPC_DCR + +/** + * temac_dma_dcr_in32 - DCR based DMA read + */ +static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) +{ + return dcr_read(lp->sdma_dcrs, reg); +} + +/** + * temac_dma_dcr_out32 - DCR based DMA write + */ +static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) +{ dcr_write(lp->sdma_dcrs, reg, value); } /** + * temac_dcr_setup - If the DMA is DCR based, then setup the address and + * I/O functions + */ +static int temac_dcr_setup(struct temac_local *lp, struct of_device *op, + struct device_node *np) +{ + unsigned int dcrs; + + /* setup the dcr address mapping if it's in the device tree */ + + dcrs = dcr_resource_start(np, 0); + if (dcrs != 0) { + lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); + lp->dma_in = temac_dma_dcr_in; + lp->dma_out = temac_dma_dcr_out; + dev_dbg(&op->dev, "DCR base: %x\n", dcrs); + return 0; + } + /* no DCR in the device tree, indicate a failure */ + return -1; +} + +#else + +/* + * temac_dcr_setup - This is a stub for when DCR is not supported, + * such as with MicroBlaze + */ +static int temac_dcr_setup(struct temac_local *lp, struct of_device *op, + struct device_node *np) +{ + return -1; +} + +#endif + +/** * temac_dma_bd_init - Setup buffer descriptor rings */ static int temac_dma_bd_init(struct net_device *ndev) @@ -155,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); - skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE - + XTE_ALIGN, GFP_ATOMIC); + skb = netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE); + if (skb == 0) { dev_err(&ndev->dev, "alloc_skb error %d\n", i); return -1; } lp->rx_skb[i] = skb; - skb_reserve(skb, BUFFER_ALIGN(skb->data)); /* returns physical address of skb->data */ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, skb->data, @@ -172,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; } - temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 | + lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* 0x10220483 */ /* 0x00100483 */ - temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 | + lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN | CHNL_CTRL_IRQ_IOE); /* 0xff010283 */ - temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p); - temac_dma_out32(lp, RX_TAILDESC_PTR, + lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); + lp->dma_out(lp, RX_TAILDESC_PTR, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); - temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p); + lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); return 0; } @@ -250,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev) temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); } else if (!netdev_mc_empty(ndev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; i = 0; - netdev_for_each_mc_addr(mclist, ndev) { + netdev_for_each_mc_addr(ha, ndev) { if (i >= MULTICAST_CAM_TABLE_NUM) break; - multi_addr_msw = ((mclist->dmi_addr[3] << 24) | - (mclist->dmi_addr[2] << 16) | - (mclist->dmi_addr[1] << 8) | - (mclist->dmi_addr[0])); + multi_addr_msw = ((ha->addr[3] << 24) | + (ha->addr[2] << 16) | + (ha->addr[1] << 8) | + (ha->addr[0])); temac_indirect_out32(lp, XTE_MAW0_OFFSET, multi_addr_msw); - multi_addr_lsw = ((mclist->dmi_addr[5] << 8) | - (mclist->dmi_addr[4]) | (i << 16)); + multi_addr_lsw = ((ha->addr[5] << 8) | + (ha->addr[4]) | (i << 16)); temac_indirect_out32(lp, XTE_MAW1_OFFSET, multi_addr_lsw); i++; @@ -426,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev) temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); /* Reset Local Link (DMA) */ - temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); + lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); timeout = 1000; - while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { + while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { udelay(1); if (--timeout == 0) { dev_err(&ndev->dev, @@ -436,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev) break; } } - temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); + lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); temac_dma_bd_init(ndev); @@ -597,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) lp->tx_bd_tail = 0; /* Kick off the transfer */ - temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ return NETDEV_TX_OK; } @@ -611,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev) struct cdmac_bd *cur_p; dma_addr_t tail_p; int length; - unsigned long skb_vaddr; unsigned long flags; spin_lock_irqsave(&lp->rx_lock, flags); @@ -625,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev) skb = lp->rx_skb[lp->rx_bd_ci]; length = cur_p->app4 & 0x3FFF; - skb_vaddr = virt_to_bus(skb->data); - dma_unmap_single(ndev->dev.parent, skb_vaddr, length, + dma_unmap_single(ndev->dev.parent, cur_p->phys, length, DMA_FROM_DEVICE); skb_put(skb, length); @@ -639,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev) ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; - new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN, - GFP_ATOMIC); + new_skb = netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE); + if (new_skb == 0) { dev_err(&ndev->dev, "no memory for new sk_buff\n"); spin_unlock_irqrestore(&lp->rx_lock, flags); return; } - skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data)); - cur_p->app0 = STS_CTRL_APP0_IRQONEND; cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, XTE_MAX_JUMBO_FRAME_SIZE, @@ -663,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev) cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; bdstat = cur_p->app0; } - temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p); + lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); spin_unlock_irqrestore(&lp->rx_lock, flags); } @@ -674,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) struct temac_local *lp = netdev_priv(ndev); unsigned int status; - status = temac_dma_in32(lp, TX_IRQ_REG); - temac_dma_out32(lp, TX_IRQ_REG, status); + status = lp->dma_in(lp, TX_IRQ_REG); + lp->dma_out(lp, TX_IRQ_REG, status); if (status & (IRQ_COAL | IRQ_DLY)) temac_start_xmit_done(lp->ndev); @@ -692,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) unsigned int status; /* Read and clear the status registers */ - status = temac_dma_in32(lp, RX_IRQ_REG); - temac_dma_out32(lp, RX_IRQ_REG, status); + status = lp->dma_in(lp, RX_IRQ_REG); + lp->dma_out(lp, RX_IRQ_REG, status); if (status & (IRQ_COAL | IRQ_DLY)) ll_temac_recv(lp->ndev); @@ -794,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev, int i, len = 0; for (i = 0; i < 0x11; i++) - len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i), + len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i), (i % 8) == 7 ? "\n" : " "); len += sprintf(buf + len, "\n"); @@ -820,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) struct net_device *ndev; const void *addr; int size, rc = 0; - unsigned int dcrs; /* Init network device structure */ ndev = alloc_etherdev(sizeof(*lp)); @@ -870,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) goto nodev; } - dcrs = dcr_resource_start(np, 0); - if (dcrs == 0) { - dev_err(&op->dev, "could not get DMA register address\n"); - goto nodev; + /* Setup the DMA register accesses, could be DCR or memory mapped */ + if (temac_dcr_setup(lp, op, np)) { + + /* no DCR in the device tree, try non-DCR */ + lp->sdma_regs = of_iomap(np, 0); + if (lp->sdma_regs) { + lp->dma_in = temac_dma_in32; + lp->dma_out = temac_dma_out32; + dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); + } else { + dev_err(&op->dev, "unable to map DMA registers\n"); + goto nodev; + } } - lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); - dev_dbg(&op->dev, "DCR base: %x\n", dcrs); lp->rx_irq = irq_of_parse_and_map(np, 0); lp->tx_irq = irq_of_parse_and_map(np, 1); diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c index da0e462308d..5ae28c975b3 100644 --- a/drivers/net/ll_temac_mdio.c +++ b/drivers/net/ll_temac_mdio.c @@ -10,6 +10,7 @@ #include <linux/phy.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/slab.h> #include <linux/of_mdio.h> #include "ll_temac.h" diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 3e3cc04defd..72379c5439d 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -1256,7 +1256,7 @@ static void set_multicast_list(struct net_device *dev) { dev->name, netdev_mc_count(dev)); if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *cp; cmd = kmalloc(sizeof(struct i596_cmd) + 2 + netdev_mc_count(dev) * 6, GFP_ATOMIC); @@ -1267,8 +1267,8 @@ static void set_multicast_list(struct net_device *dev) { cmd->command = CmdMulticastList; *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; cp = ((char *)(cmd + 1))+2; - netdev_for_each_mc_addr(dmi, dev) { - memcpy(cp, dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(cp, ha->addr, 6); cp += 6; } if (i596_debug & LOG_SRCDST) diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index a8768672dc5..c8e68fde066 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -28,7 +28,6 @@ #include <linux/ioport.h> #include <linux/nubus.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index c292a608f9a..c0876e915ee 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -88,7 +88,6 @@ static char *version = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/nubus.h> #include <linux/errno.h> @@ -98,6 +97,7 @@ static char *version = #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> diff --git a/drivers/net/macb.c b/drivers/net/macb.c index c8a18a6203c..cf7debc865b 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -793,6 +793,7 @@ static void macb_init_hw(struct macb *bp) config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ + config |= MACB_BIT(BIG); /* Receive oversized frames */ if (bp->dev->flags & IFF_PROMISC) config |= MACB_BIT(CAF); /* Copy All Frames */ if (!(bp->dev->flags & IFF_BROADCAST)) @@ -882,15 +883,15 @@ static int hash_get_index(__u8 *addr) */ static void macb_sethashtable(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; struct macb *bp = netdev_priv(dev); mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/mace.c b/drivers/net/mace.c index ab5f0bf6d1a..b6855a6476f 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c @@ -16,6 +16,7 @@ #include <linux/crc32.h> #include <linux/spinlock.h> #include <linux/bitrev.h> +#include <linux/slab.h> #include <asm/prom.h> #include <asm/dbdma.h> #include <asm/io.h> @@ -598,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev) mp->maccc |= PROM; } else { unsigned char multicast_filter[8]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) @@ -606,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev) } else { for (i = 0; i < 8; i++) multicast_filter[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); i = crc >> 26; /* bit number in multicast_filter */ multicast_filter[i >> 3] |= 1 << (i & 7); } diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 13ba8f4afb7..a6e19fc8a80 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -30,6 +30,7 @@ #include <linux/bitrev.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/macintosh.h> @@ -508,7 +509,7 @@ static void mace_set_multicast(struct net_device *dev) mb->maccc |= PROM; } else { unsigned char multicast_filter[8]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) { @@ -517,8 +518,8 @@ static void mace_set_multicast(struct net_device *dev) } else { for (i = 0; i < 8; i++) multicast_filter[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); /* bit number in multicast_filter */ i = crc >> 26; multicast_filter[i >> 3] |= 1 << (i & 7); diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 24109c28810..adb54fe2d82 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -35,11 +35,11 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> +#include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/nubus.h> @@ -50,6 +50,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/bitrev.h> +#include <linux/slab.h> #include <asm/bootinfo.h> #include <asm/system.h> diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 40faa368b07..9a939d828b4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -282,7 +282,7 @@ static int macvlan_open(struct net_device *dev) if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; - err = dev_unicast_add(lowerdev, dev->dev_addr); + err = dev_uc_add(lowerdev, dev->dev_addr); if (err < 0) goto out; if (dev->flags & IFF_ALLMULTI) { @@ -294,7 +294,7 @@ static int macvlan_open(struct net_device *dev) return 0; del_unicast: - dev_unicast_delete(lowerdev, dev->dev_addr); + dev_uc_del(lowerdev, dev->dev_addr); out: return err; } @@ -308,7 +308,7 @@ static int macvlan_stop(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); - dev_unicast_delete(lowerdev, dev->dev_addr); + dev_uc_del(lowerdev, dev->dev_addr); macvlan_hash_del(vlan); return 0; @@ -332,11 +332,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) if (macvlan_addr_busy(vlan->port, addr->sa_data)) return -EBUSY; - err = dev_unicast_add(lowerdev, addr->sa_data); + err = dev_uc_add(lowerdev, addr->sa_data); if (err) return err; - dev_unicast_delete(lowerdev, dev->dev_addr); + dev_uc_del(lowerdev, dev->dev_addr); macvlan_hash_change_addr(vlan, addr->sa_data); } @@ -748,6 +748,9 @@ static int macvlan_device_event(struct notifier_block *unused, list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); break; + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid underlaying device to change its type. */ + return NOTIFY_BAD; } return NOTIFY_DONE; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 55ceae09738..abba3cc81f1 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -9,6 +9,7 @@ #include <linux/cache.h> #include <linux/sched.h> #include <linux/types.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/cdev.h> diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c index 65ec77dc31f..23cee7b6af9 100644 --- a/drivers/net/mlx4/cmd.c +++ b/drivers/net/mlx4/cmd.c @@ -33,6 +33,7 @@ */ #include <linux/sched.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/errno.h> diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index ccfe276943f..7cd34e9c7c7 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c @@ -35,6 +35,7 @@ */ #include <linux/hardirq.h> +#include <linux/gfp.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/cq.h> diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 507e11fce9e..cbabf14f95d 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c @@ -35,6 +35,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index c48b0f4b17b..96180c0ec20 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -35,6 +35,7 @@ #include <linux/tcp.h> #include <linux/if_vlan.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> @@ -160,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work) static void mlx4_en_clear_list(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct dev_mc_list *plist = priv->mc_list; - struct dev_mc_list *next; - while (plist) { - next = plist->next; - kfree(plist); - plist = next; - } - priv->mc_list = NULL; + kfree(priv->mc_addrs); + priv->mc_addrs_cnt = 0; } static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct dev_mc_list *mclist; - struct dev_mc_list *tmp; - struct dev_mc_list *plist = NULL; - - for (mclist = dev->mc_list; mclist; mclist = mclist->next) { - tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); - if (!tmp) { - en_err(priv, "failed to allocate multicast list\n"); - mlx4_en_clear_list(dev); - return; - } - memcpy(tmp, mclist, sizeof(struct dev_mc_list)); - tmp->next = NULL; - if (plist) - plist->next = tmp; - else - priv->mc_list = tmp; - plist = tmp; + struct netdev_hw_addr *ha; + char *mc_addrs; + int mc_addrs_cnt = netdev_mc_count(dev); + int i; + + mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); + if (!mc_addrs) { + en_err(priv, "failed to allocate multicast list\n"); + return; } + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); + priv->mc_addrs = mc_addrs; + priv->mc_addrs_cnt = mc_addrs_cnt; } @@ -212,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) mcast_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; - struct dev_mc_list *mclist; u64 mcast_addr = 0; int err; @@ -288,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) if (err) en_err(priv, "Failed disabling multicast filter\n"); } else { + int i; + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) @@ -302,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) netif_tx_lock_bh(dev); mlx4_en_cache_mclist(dev); netif_tx_unlock_bh(dev); - for (mclist = priv->mc_list; mclist; mclist = mclist->next) { - mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); + for (i = 0; i < priv->mc_addrs_cnt; i++) { + mcast_addr = + mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -511,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); if (err) - en_dbg(HW, priv, "Could not update stats \n"); + en_dbg(HW, priv, "Could not update stats\n"); mutex_lock(&mdev->state_lock); if (mdev->device_up) { @@ -984,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->flags = prof->flags; priv->tx_ring_num = prof->tx_ring_num; priv->rx_ring_num = prof->rx_ring_num; - priv->mc_list = NULL; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c index 16256784a94..0dfb4ec8a9d 100644 --- a/drivers/net/mlx4/en_resources.c +++ b/drivers/net/mlx4/en_resources.c @@ -31,6 +31,7 @@ * */ +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mlx4/qp.h> diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 64394647ddd..8e2fcb7103c 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -32,6 +32,7 @@ */ #include <linux/mlx4/cq.h> +#include <linux/slab.h> #include <linux/mlx4/qp.h> #include <linux/skbuff.h> #include <linux/if_ether.h> diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 3d1396af946..580968f304e 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -33,6 +33,7 @@ #include <asm/page.h> #include <linux/mlx4/cq.h> +#include <linux/slab.h> #include <linux/mlx4/qp.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index bffb7995cb7..7365bf488b8 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -32,6 +32,7 @@ */ #include <linux/interrupt.h> +#include <linux/slab.h> #include <linux/mm.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 04b382fcb8c..57288ca1395 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c @@ -34,6 +34,7 @@ #include <linux/errno.h> #include <linux/mm.h> #include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/mlx4/cmd.h> diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 0e7eb1038f9..55506780275 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#include <linux/slab.h> + #include "mlx4.h" struct mlx4_device_context { diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 8f6e816a739..e3e0d54a7c8 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -38,6 +38,7 @@ #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -1023,6 +1024,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_attr.show = show_port_type; info->port_attr.store = set_port_type; + sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_attr); if (err) { diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 5ccbce9866f..c4f88b7ef7b 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c @@ -32,7 +32,6 @@ */ #include <linux/string.h> -#include <linux/slab.h> #include <linux/mlx4/cmd.h> diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index 82c3ebc584e..b55e46c8b68 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -492,7 +492,8 @@ struct mlx4_en_priv { struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_port_stats port_stats; - struct dev_mc_list *mc_list; + char *mc_addrs; + int mc_addrs_cnt; struct mlx4_en_stat_out_mbox hw_stats; }; diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index ca7ab8e7b4c..3dc69be4949 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -33,6 +33,7 @@ */ #include <linux/errno.h> +#include <linux/slab.h> #include <linux/mlx4/cmd.h> diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index ca25b9dc837..5caf0115fa5 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c @@ -32,6 +32,8 @@ * SOFTWARE. */ +#include <linux/slab.h> + #include "mlx4.h" #include "fw.h" diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index 42ab9fc01d3..ec9350e5f21 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c @@ -33,6 +33,7 @@ * SOFTWARE. */ +#include <linux/gfp.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index 1377d0dc8f1..3b07b80a045 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c @@ -32,6 +32,7 @@ */ #include <linux/mlx4/cmd.h> +#include <linux/gfp.h> #include "mlx4.h" #include "icm.h" diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index c97b6e4365a..4ee9d04f659 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -54,6 +54,7 @@ #include <linux/io.h> #include <linux/types.h> #include <linux/inet_lro.h> +#include <linux/slab.h> #include <asm/system.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; @@ -1769,7 +1770,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev) struct mv643xx_eth_private *mp = netdev_priv(dev); u32 *mc_spec; u32 *mc_other; - struct dev_addr_list *addr; + struct netdev_hw_addr *ha; int i; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { @@ -1794,8 +1795,8 @@ oom: memset(mc_spec, 0, 0x100); memset(mc_other, 0, 0x100); - netdev_for_each_mc_addr(addr, dev) { - u8 *a = addr->da_addr; + netdev_for_each_mc_addr(ha, dev) { + u8 *a = ha->addr; u32 *table; int entry; diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c index 93c709d63e2..3a7ad840d5b 100644 --- a/drivers/net/mvme147.c +++ b/drivers/net/mvme147.c @@ -10,11 +10,11 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/errno.h> +#include <linux/gfp.h> /* Used for the temporal inet entries and routing */ #include <linux/socket.h> #include <linux/route.h> diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 676c513e12f..e0b47cc8a86 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -64,6 +64,7 @@ #include <linux/moduleparam.h> #include <linux/io.h> #include <linux/log2.h> +#include <linux/slab.h> #include <net/checksum.h> #include <net/ip.h> #include <net/tcp.h> @@ -109,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL"); struct myri10ge_rx_buffer_state { struct page *page; int page_offset; - DECLARE_PCI_UNMAP_ADDR(bus) - DECLARE_PCI_UNMAP_LEN(len) + DEFINE_DMA_UNMAP_ADDR(bus); + DEFINE_DMA_UNMAP_LEN(len); }; struct myri10ge_tx_buffer_state { struct sk_buff *skb; int last; - DECLARE_PCI_UNMAP_ADDR(bus) - DECLARE_PCI_UNMAP_LEN(len) + DEFINE_DMA_UNMAP_ADDR(bus); + DEFINE_DMA_UNMAP_LEN(len); }; struct myri10ge_cmd { @@ -1233,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, rx->info[idx].page_offset = rx->page_offset; /* note that this is the address of the start of the * page */ - pci_unmap_addr_set(&rx->info[idx], bus, rx->bus); + dma_unmap_addr_set(&rx->info[idx], bus, rx->bus); rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); rx->shadow[idx].addr_high = @@ -1265,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, /* unmap the recvd page if we're the only or last user of it */ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { - pci_unmap_page(pdev, (pci_unmap_addr(info, bus) + pci_unmap_page(pdev, (dma_unmap_addr(info, bus) & ~(MYRI10GE_ALLOC_SIZE - 1)), MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); } @@ -1372,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) tx->info[idx].last = 0; } tx->done++; - len = pci_unmap_len(&tx->info[idx], len); - pci_unmap_len_set(&tx->info[idx], len, 0); + len = dma_unmap_len(&tx->info[idx], len); + dma_unmap_len_set(&tx->info[idx], len, 0); if (skb) { ss->stats.tx_bytes += skb->len; ss->stats.tx_packets++; dev_kfree_skb_irq(skb); if (len) pci_unmap_single(pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } else { if (len) pci_unmap_page(pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } @@ -1689,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev, if (pause->tx_pause != mgp->pause) return myri10ge_change_pause(mgp, pause->tx_pause); if (pause->rx_pause != mgp->pause) - return myri10ge_change_pause(mgp, pause->tx_pause); + return myri10ge_change_pause(mgp, pause->rx_pause); if (pause->autoneg != 0) return -EINVAL; return 0; @@ -2093,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss) /* Mark as free */ tx->info[idx].skb = NULL; tx->done++; - len = pci_unmap_len(&tx->info[idx], len); - pci_unmap_len_set(&tx->info[idx], len, 0); + len = dma_unmap_len(&tx->info[idx], len); + dma_unmap_len_set(&tx->info[idx], len, 0); if (skb) { ss->stats.tx_dropped++; dev_kfree_skb_any(skb); if (len) pci_unmap_single(mgp->pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } else { if (len) pci_unmap_page(mgp->pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); } @@ -2756,12 +2757,12 @@ again: } /* map the skb for DMA */ - len = skb->len - skb->data_len; + len = skb_headlen(skb); idx = tx->req & tx->mask; tx->info[idx].skb = skb; bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); - pci_unmap_addr_set(&tx->info[idx], bus, bus); - pci_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); frag_cnt = skb_shinfo(skb)->nr_frags; frag_idx = 0; @@ -2864,8 +2865,8 @@ again: len = frag->size; bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset, len, PCI_DMA_TODEVICE); - pci_unmap_addr_set(&tx->info[idx], bus, bus); - pci_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); } (req - rdma_count)->rdma_count = rdma_count; @@ -2902,19 +2903,19 @@ abort_linearize: idx = tx->req & tx->mask; tx->info[idx].skb = NULL; do { - len = pci_unmap_len(&tx->info[idx], len); + len = dma_unmap_len(&tx->info[idx], len); if (len) { if (tx->info[idx].skb != NULL) pci_unmap_single(mgp->pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); else pci_unmap_page(mgp->pdev, - pci_unmap_addr(&tx->info[idx], + dma_unmap_addr(&tx->info[idx], bus), len, PCI_DMA_TODEVICE); - pci_unmap_len_set(&tx->info[idx], len, 0); + dma_unmap_len_set(&tx->info[idx], len, 0); tx->info[idx].skb = NULL; } idx = (idx + 1) & tx->mask; @@ -3001,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) { struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; __be32 data[2] = { 0, 0 }; int err; @@ -3038,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev) } /* Walk the multicast list, and add each address */ - netdev_for_each_mc_addr(mc_list, dev) { - memcpy(data, &mc_list->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(data, &ha->addr, 6); cmd.data0 = ntohl(data[0]); cmd.data1 = ntohl(data[1]); err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, @@ -3047,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) if (err != 0) { netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", - err, mc_list->dmi_addr); + err, ha->addr); goto abort; } } @@ -3687,7 +3688,6 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) if (status != 0) { dev_err(&mgp->pdev->dev, "failed reset\n"); goto abort_with_fw; - return; } mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot); diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 8b431308535..b72e749afdf 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -14,7 +14,6 @@ static char version[] = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> @@ -26,6 +25,7 @@ static char version[] = #include <linux/of.h> #include <linux/of_device.h> #include <linux/firmware.h> +#include <linux/gfp.h> #include <net/dst.h> #include <net/arp.h> diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index e5203878324..9250bf6573e 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -2493,12 +2493,12 @@ static void __set_rx_mode(struct net_device *dev) rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 992dbfffdb0..f4347f88b6f 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -142,7 +142,7 @@ bad_clone_list[] __initdata = { {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ #ifdef CONFIG_MACH_TX49XX - {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ + {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ #endif {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ {NULL,} diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index a53bb201d3c..ff3c4c81498 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c @@ -66,7 +66,6 @@ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.o #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index bf4af5248cb..a361dea3557 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -37,6 +37,7 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/console.h> #include <linux/moduleparam.h> #include <linux/string.h> diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 144d2e88042..174ac8ef82f 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 72 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.72" +#define _NETXEN_NIC_LINUX_SUBVERSION 73 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.73" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) @@ -420,7 +420,6 @@ struct status_desc { } __attribute__ ((aligned(16))); /* UNIFIED ROMIMAGE *************************/ -#define NX_UNI_FW_MIN_SIZE 0xc8000 #define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 #define NX_UNI_DIR_SECT_BOOTLD 0x6 #define NX_UNI_DIR_SECT_FW 0x7 diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 2a8ef5fc966..f26e54716c8 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) } sds_ring->desc_head = (struct status_desc *)addr; - sds_ring->crb_sts_consumer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_sts_consumer[ring]); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + sds_ring->crb_sts_consumer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_sts_consumer[ring]); - sds_ring->crb_intr_mask = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].sw_int_mask[ring]); + sds_ring->crb_intr_mask = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].sw_int_mask[ring]); + } } diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index f8499e56cbe..aecba787f7c 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -703,6 +703,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev, } } +static u32 netxen_nic_get_tx_csum(struct net_device *dev) +{ + return dev->features & NETIF_F_IP_CSUM; +} + static u32 netxen_nic_get_rx_csum(struct net_device *dev) { struct netxen_adapter *adapter = netdev_priv(dev); @@ -909,6 +914,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = { .set_ringparam = netxen_nic_set_ringparam, .get_pauseparam = netxen_nic_get_pauseparam, .set_pauseparam = netxen_nic_set_pauseparam, + .get_tx_csum = netxen_nic_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = ethtool_op_set_sg, .get_tso = netxen_nic_get_tso, diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index a945591298a..5e5fe2fd639 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -23,6 +23,7 @@ * */ +#include <linux/slab.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" @@ -537,7 +538,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 null_addr[6]; int i; @@ -571,8 +572,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev) netxen_nic_enable_mcast_filter(adapter); i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) + netxen_nic_set_mcast_addr(adapter, i++, ha->addr); /* Clear out remaining addresses */ while (i < adapter->max_mc_count) @@ -680,7 +681,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, void netxen_p3_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; LIST_HEAD(del_list); @@ -707,8 +708,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) } if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(mc_ptr, netdev) - nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); + netdev_for_each_mc_addr(ha, netdev) + nx_p3_nic_add_mac(adapter, ha->addr, &del_list); } send_fw_cmd: diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1c63610ead4..388feaf60ee 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -25,6 +25,7 @@ #include <linux/netdevice.h> #include <linux/delay.h> +#include <linux/slab.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" @@ -613,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) return NULL; } +#define QLCNIC_FILEHEADER_SIZE (14 * 4) + static int -nx_set_product_offs(struct netxen_adapter *adapter) -{ - struct uni_table_desc *ptab_descr; +netxen_nic_validate_header(struct netxen_adapter *adapter) + { const u8 *unirom = adapter->fw->data; - uint32_t i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + u32 fw_file_size = adapter->fw->size; + u32 tab_size; __le32 entries; + __le32 entry_size; + + if (fw_file_size < QLCNIC_FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_bootld(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_BOOTLD_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + return 0; +} + +static int +netxen_nic_validate_fw(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_FIRMWARE_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + + +static int +netxen_nic_validate_product_offs(struct netxen_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 1 : netxen_p3_has_mn(adapter); + __le32 entries; + __le32 entry_size; + u32 tab_size; + u32 i; ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); if (ptab_descr == NULL) - return -1; + return -EINVAL; entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; nomn: for (i = 0; i < entries; i++) { @@ -657,9 +759,38 @@ nomn: goto nomn; } - return -1; + return -EINVAL; } +static int +netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) +{ + if (netxen_nic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, u32 section, u32 idx_offset) @@ -761,7 +892,7 @@ nx_get_bios_version(struct netxen_adapter *adapter) if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + NX_UNI_BIOS_VERSION_OFF)); - return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } else return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); @@ -889,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter) flashaddr += 8; } + + size = (__force u32)nx_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + } + } else { u64 data; u32 hi, lo; @@ -933,27 +1074,23 @@ static int netxen_validate_firmware(struct netxen_adapter *adapter) { __le32 val; - u32 ver, min_ver, bios, min_size; + u32 ver, min_ver, bios; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->fw_type; if (fw_type == NX_UNIFIED_ROMIMAGE) { - if (nx_set_product_offs(adapter)) + if (netxen_nic_validate_unified_romimage(adapter)) return -EINVAL; - - min_size = NX_UNI_FW_MIN_SIZE; } else { val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); if ((__force u32)val != NETXEN_BDINFO_MAGIC) return -EINVAL; - min_size = NX_FW_MIN_SIZE; + if (fw->size < NX_FW_MIN_SIZE) + return -EINVAL; } - if (fw->size < min_size) - return -EINVAL; - val = nx_get_fw_version(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 08780ef1c1f..b665b420a4f 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -23,6 +23,7 @@ * */ +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include "netxen_nic_hw.h" @@ -604,16 +605,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter) static int netxen_setup_pci_map(struct netxen_adapter *adapter) { - void __iomem *mem_ptr0 = NULL; - void __iomem *mem_ptr1 = NULL; - void __iomem *mem_ptr2 = NULL; void __iomem *db_ptr = NULL; resource_size_t mem_base, db_base; - unsigned long mem_len, db_len = 0, pci_len0 = 0; + unsigned long mem_len, db_len = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; + struct netxen_hardware_context *ahw = &adapter->ahw; int err = 0; @@ -630,24 +629,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) /* 128 Meg of memory */ if (mem_len == NETXEN_PCI_128MB_SIZE) { - mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); - mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, + + ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); + ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE); - mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); - pci_len0 = FIRST_PAGE_GROUP_SIZE; + if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || + ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; + } else if (mem_len == NETXEN_PCI_32MB_SIZE) { - mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); - mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - + + ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + } else if (mem_len == NETXEN_PCI_2MB_SIZE) { - mem_ptr0 = pci_ioremap_bar(pdev, 0); - if (mem_ptr0 == NULL) { + ahw->pci_base0 = pci_ioremap_bar(pdev, 0); + if (ahw->pci_base0 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); return -EIO; } - pci_len0 = mem_len; + ahw->pci_len0 = mem_len; } else { return -EIO; } @@ -656,11 +671,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - adapter->ahw.pci_base0 = mem_ptr0; - adapter->ahw.pci_len0 = pci_len0; - adapter->ahw.pci_base1 = mem_ptr1; - adapter->ahw.pci_base2 = mem_ptr2; - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); @@ -772,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter) if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; - } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) { - switch (adapter->ahw.board_type) { - case NETXEN_BRDTYPE_P2_SB31_10G: - case NETXEN_BRDTYPE_P2_SB31_10G_CX4: - adapter->msix_supported = !!use_msi_x; - adapter->rss_supported = !!use_msi_x; - break; - default: - break; + } else { + u32 flashed_ver = 0; + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + break; + default: + break; + } } } @@ -1246,8 +1263,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int pci_func_id = PCI_FUNC(pdev->devfn); uint8_t revision_id; - if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x" + if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { + pr_warning("%s: chip revisions between 0x%x-0x%x " "will not be enabled.\n", module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; @@ -2294,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work) } break; + case NX_DEV_NEED_RESET: case NX_DEV_INITALIZING: if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { netxen_schedule_work(adapter, @@ -2337,6 +2355,9 @@ netxen_detach_work(struct work_struct *work) ref_cnt = nx_decr_dev_ref_cnt(adapter); + if (ref_cnt == -EIO) + goto err_ret; + delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); adapter->fw_wait_cnt = 0; diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index c16cbfb4061..3892330f244 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c @@ -51,7 +51,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 05c29c2cef2..b7837ebd9a7 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -109,7 +109,6 @@ static int fifo = 0x8; /* don't change */ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> @@ -596,7 +595,7 @@ static int init586(struct net_device *dev) struct iasetup_cmd_struct __iomem *ias_cmd; struct tdr_cmd_struct __iomem *tdr_cmd; struct mcsetup_cmd_struct __iomem *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs = netdev_mc_count(dev); ptr = p->scb + 1; @@ -725,8 +724,8 @@ static int init586(struct net_device *dev) writew(num_addrs * 6, &mc_cmd->mc_cnt); i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6); writew(make16(mc_cmd), &p->scb->cbl_offset); writeb(CUC_START, &p->scb->cmd_cuc); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 0678f3106cb..ef940226ee2 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -25,6 +25,7 @@ #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/list.h> +#include <linux/slab.h> #include <linux/io.h> @@ -6313,7 +6314,6 @@ static void niu_set_rx_mode(struct net_device *dev) { struct niu *np = netdev_priv(dev); int i, alt_cnt, err; - struct dev_addr_list *addr; struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; @@ -6365,8 +6365,8 @@ static void niu_set_rx_mode(struct net_device *dev) for (i = 0; i < 16; i++) hash[i] = 0xffff; } else if (!netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(addr, dev) { - u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 8dd509c09bc..e88e97cd1b1 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -116,6 +116,7 @@ #include <linux/if_vlan.h> #include <linux/rtnetlink.h> #include <linux/jiffies.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c index be368e5cbf7..6b1d443f2ce 100644 --- a/drivers/net/octeon/octeon_mgmt.c +++ b/drivers/net/octeon/octeon_mgmt.c @@ -13,6 +13,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/slab.h> #include <linux/phy.h> #include <linux/spinlock.h> @@ -316,7 +317,6 @@ good: skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; - netdev->last_rx = jiffies; netif_receive_skb(skb); rc = 0; } else if (re.s.code == RING_ENTRY_CODE_MORE) { @@ -474,7 +474,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ struct octeon_mgmt_cam_state cam_state; - struct dev_addr_list *list; + struct netdev_hw_addr *ha; struct list_head *pos; int available_cam_entries; @@ -510,8 +510,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) } } if (multicast_mode == 0) { - netdev_for_each_mc_addr(list, netdev) - octeon_mgmt_cam_state_add(&cam_state, list->da_addr); + netdev_for_each_mc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index d44d4a208bb..370c147d08a 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 36785853a14..dc3b4c7914f 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -1813,12 +1813,12 @@ static void netdrv_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 091e0b00043..580977f56ad 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c @@ -1,20 +1,20 @@ /*====================================================================== A PCMCIA ethernet driver for the 3com 3c589 card. - + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net 3c589_cs.c 1.162 2001/10/13 00:08:50 The network driver code is based on Donald Becker's 3c589 code: - + Written 1994 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Donald Becker may be reached at becker@scyld.com - + Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> ======================================================================*/ @@ -69,31 +69,54 @@ /* The top five bits written to EL3_CMD are a command, the lower 11 bits are the parameter, if applicable. */ enum c509cmd { - TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, - RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, - TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, - FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, - SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, - SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, - StatsDisable = 22<<11, StopCoax = 23<<11, + TotalReset = 0<<11, + SelectWindow = 1<<11, + StartCoax = 2<<11, + RxDisable = 3<<11, + RxEnable = 4<<11, + RxReset = 5<<11, + RxDiscard = 8<<11, + TxEnable = 9<<11, + TxDisable = 10<<11, + TxReset = 11<<11, + FakeIntr = 12<<11, + AckIntr = 13<<11, + SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, + SetRxFilter = 16<<11, + SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, + SetTxStart = 19<<11, + StatsEnable = 21<<11, + StatsDisable = 22<<11, + StopCoax = 23<<11 }; enum c509status { - IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, - TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, - IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 + IntLatch = 0x0001, + AdapterFailure = 0x0002, + TxComplete = 0x0004, + TxAvailable = 0x0008, + RxComplete = 0x0010, + RxEarly = 0x0020, + IntReq = 0x0040, + StatsFull = 0x0080, + CmdBusy = 0x1000 }; /* The SetRxFilter command accepts the following classes: */ enum RxFilter { - RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 + RxStation = 1, + RxMulticast = 2, + RxBroadcast = 4, + RxProm = 8 }; /* Register window 1 offsets, the window used in normal operation. */ #define TX_FIFO 0x00 #define RX_FIFO 0x00 -#define RX_STATUS 0x08 -#define TX_STATUS 0x0B +#define RX_STATUS 0x08 +#define TX_STATUS 0x0B #define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ #define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ @@ -106,13 +129,13 @@ enum RxFilter { struct el3_private { struct pcmcia_device *p_dev; - dev_node_t node; - /* For transceiver monitoring */ - struct timer_list media; - u16 media_status; - u16 fast_poll; - unsigned long last_irq; - spinlock_t lock; + dev_node_t node; + /* For transceiver monitoring */ + struct timer_list media; + u16 media_status; + u16 fast_poll; + unsigned long last_irq; + spinlock_t lock; }; static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; @@ -164,15 +187,15 @@ static void tc589_detach(struct pcmcia_device *p_dev); ======================================================================*/ static const struct net_device_ops el3_netdev_ops = { - .ndo_open = el3_open, - .ndo_stop = el3_close, + .ndo_open = el3_open, + .ndo_stop = el3_close, .ndo_start_xmit = el3_start_xmit, - .ndo_tx_timeout = el3_tx_timeout, + .ndo_tx_timeout = el3_tx_timeout, .ndo_set_config = el3_config, .ndo_get_stats = el3_get_stats, .ndo_set_multicast_list = set_multicast_list, .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -236,7 +259,7 @@ static void tc589_detach(struct pcmcia_device *link) tc589_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the ethernet device available to the system. - + ======================================================================*/ static int tc589_config(struct pcmcia_device *link) @@ -249,7 +272,7 @@ static int tc589_config(struct pcmcia_device *link) char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; u8 *buf; size_t len; - + dev_dbg(&link->dev, "3c589_config\n"); phys_addr = (__be16 *)dev->dev_addr; @@ -278,7 +301,7 @@ static int tc589_config(struct pcmcia_device *link) ret = pcmcia_request_configuration(link, &link->conf); if (ret) goto failed; - + dev->irq = link->irq.AssignedIRQ; dev->base_addr = link->io.BasePort1; ioaddr = dev->base_addr; @@ -312,7 +335,7 @@ static int tc589_config(struct pcmcia_device *link) dev->if_port = if_port; else printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); - + link->dev_node = &lp->node; SET_NETDEV_DEV(dev, &link->dev); @@ -324,13 +347,12 @@ static int tc589_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " - "hw_addr %pM\n", - dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, - dev->dev_addr); - printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", - (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], - if_names[dev->if_port]); + netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", + (multi ? "562" : "589"), dev->base_addr, dev->irq, + dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", + (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], + if_names[dev->if_port]); return 0; failed: @@ -343,7 +365,7 @@ failed: After a card is removed, tc589_release() will unregister the net device, and release the PCMCIA configuration. If the device is still open, this will be postponed until it is closed. - + ======================================================================*/ static void tc589_release(struct pcmcia_device *link) @@ -365,7 +387,7 @@ static int tc589_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; - if (link->open) { + if (link->open) { tc589_reset(dev); netif_device_attach(dev); } @@ -385,8 +407,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd) while (--i > 0) if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; if (i == 0) - printk(KERN_WARNING "%s: command 0x%04x did not complete!\n", - dev->name, cmd); + netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); } /* @@ -412,7 +433,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port) { struct el3_private *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; - + EL3WINDOW(0); switch (if_port) { case 0: case 1: outw(0, ioaddr + 6); break; @@ -435,14 +456,13 @@ static void dump_status(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; EL3WINDOW(1); - printk(KERN_INFO " irq status %04x, rx status %04x, tx status " - "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS), - inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS), - inw(ioaddr+TX_FREE)); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), + inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); EL3WINDOW(4); - printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x" - " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06), - inw(ioaddr+0x08), inw(ioaddr+0x0a)); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), + inw(ioaddr+0x0a)); EL3WINDOW(1); } @@ -451,18 +471,18 @@ static void tc589_reset(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; int i; - + EL3WINDOW(0); - outw(0x0001, ioaddr + 4); /* Activate board. */ + outw(0x0001, ioaddr + 4); /* Activate board. */ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ - + /* Set the station address in window 2. */ EL3WINDOW(2); for (i = 0; i < 6; i++) outb(dev->dev_addr[i], ioaddr + i); tc589_set_xcvr(dev, dev->if_port); - + /* Switch to the stats window, and clear all stats by reading. */ outw(StatsDisable, ioaddr + EL3_CMD); EL3WINDOW(6); @@ -470,7 +490,7 @@ static void tc589_reset(struct net_device *dev) inb(ioaddr+i); inw(ioaddr + 10); inw(ioaddr + 12); - + /* Switch to register set 1 for normal use. */ EL3WINDOW(1); @@ -504,8 +524,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map) if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (map->port <= 3) { dev->if_port = map->port; - printk(KERN_INFO "%s: switched to %s port\n", - dev->name, if_names[dev->if_port]); + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); tc589_set_xcvr(dev, dev->if_port); } else return -EINVAL; @@ -517,13 +536,13 @@ static int el3_open(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; - + if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; netif_start_queue(dev); - + tc589_reset(dev); init_timer(&lp->media); lp->media.function = &media_check; @@ -533,15 +552,15 @@ static int el3_open(struct net_device *dev) dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", dev->name, inw(dev->base_addr + EL3_STATUS)); - + return 0; } static void el3_tx_timeout(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - - printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); + + netdev_warn(dev, "Transmit timed out!\n"); dump_status(dev); dev->stats.tx_errors++; dev->trans_start = jiffies; @@ -555,19 +574,18 @@ static void pop_tx_status(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; int i; - + /* Clear the Tx status stack. */ for (i = 32; i > 0; i--) { u_char tx_status = inb(ioaddr + TX_STATUS); if (!(tx_status & 0x84)) break; /* reset transmitter on jabber error or underrun */ if (tx_status & 0x30) - tc589_wait_for_completion(dev, TxReset); + tc589_wait_for_completion(dev, TxReset); if (tx_status & 0x38) { - pr_debug("%s: transmit error: status 0x%02x\n", - dev->name, tx_status); - outw(TxEnable, ioaddr + EL3_CMD); - dev->stats.tx_aborted_errors++; + netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; } outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ } @@ -580,11 +598,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct el3_private *priv = netdev_priv(dev); unsigned long flags; - pr_debug("%s: el3_start_xmit(length = %ld) called, " - "status %4.4x.\n", dev->name, (long)skb->len, - inw(ioaddr + EL3_STATUS)); + netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", + (long)skb->len, inw(ioaddr + EL3_STATUS)); - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->lock, flags); dev->stats.tx_bytes += skb->len; @@ -602,9 +619,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb, } pop_tx_status(dev); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); dev_kfree_skb(skb); - + return NETDEV_TX_OK; } @@ -616,37 +633,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) unsigned int ioaddr; __u16 status; int i = 0, handled = 1; - + if (!netif_device_present(dev)) return IRQ_NONE; ioaddr = dev->base_addr; - pr_debug("%s: interrupt, status %4.4x.\n", - dev->name, inw(ioaddr + EL3_STATUS)); + netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); - spin_lock(&lp->lock); + spin_lock(&lp->lock); while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete | StatsFull)) { if ((status & 0xe000) != 0x2000) { - pr_debug("%s: interrupt from dead card\n", dev->name); - handled = 0; - break; + netdev_dbg(dev, "interrupt from dead card\n"); + handled = 0; + break; } - if (status & RxComplete) - el3_rx(dev); - + el3_rx(dev); if (status & TxAvailable) { - pr_debug(" TX room bit was handled.\n"); - /* There's room in the FIFO for a full-sized packet. */ - outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); - netif_wake_queue(dev); + netdev_dbg(dev, " TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); } - if (status & TxComplete) - pop_tx_status(dev); - + pop_tx_status(dev); if (status & (AdapterFailure | RxEarly | StatsFull)) { /* Handle all uncommon interrupts. */ if (status & StatsFull) /* Empty statistics. */ @@ -660,8 +672,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) EL3WINDOW(4); fifo_diag = inw(ioaddr + 4); EL3WINDOW(1); - printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic" - " register %04x.\n", dev->name, fifo_diag); + netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", + fifo_diag); if (fifo_diag & 0x0400) { /* Tx overrun */ tc589_wait_for_completion(dev, TxReset); @@ -676,22 +688,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); } } - if (++i > 10) { - printk(KERN_ERR "%s: infinite loop in interrupt, " - "status %4.4x.\n", dev->name, status); - /* Clear all interrupts */ - outw(AckIntr | 0xFF, ioaddr + EL3_CMD); - break; + netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", + status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; } /* Acknowledge the IRQ. */ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); } - lp->last_irq = jiffies; - spin_unlock(&lp->lock); - pr_debug("%s: exiting interrupt, status %4.4x.\n", - dev->name, inw(ioaddr + EL3_STATUS)); + spin_unlock(&lp->lock); + netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", + inw(ioaddr + EL3_STATUS)); return IRQ_RETVAL(handled); } @@ -710,7 +720,7 @@ static void media_check(unsigned long arg) if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + EL3_TIMER) == 0xff)) { if (!lp->fast_poll) - printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name); + netdev_warn(dev, "interrupt(s) dropped!\n"); local_irq_save(flags); el3_interrupt(dev->irq, dev); @@ -727,7 +737,7 @@ static void media_check(unsigned long arg) /* lp->lock guards the EL3 window. Window should always be 1 except when the lock is held */ - spin_lock_irqsave(&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); EL3WINDOW(4); media = inw(ioaddr+WN4_MEDIA) & 0xc810; @@ -747,32 +757,30 @@ static void media_check(unsigned long arg) if (media != lp->media_status) { if ((media & lp->media_status & 0x8000) && ((lp->media_status ^ media) & 0x0800)) - printk(KERN_INFO "%s: %s link beat\n", dev->name, - (lp->media_status & 0x0800 ? "lost" : "found")); + netdev_info(dev, "%s link beat\n", + (lp->media_status & 0x0800 ? "lost" : "found")); else if ((media & lp->media_status & 0x4000) && ((lp->media_status ^ media) & 0x0010)) - printk(KERN_INFO "%s: coax cable %s\n", dev->name, - (lp->media_status & 0x0010 ? "ok" : "problem")); + netdev_info(dev, "coax cable %s\n", + (lp->media_status & 0x0010 ? "ok" : "problem")); if (dev->if_port == 0) { if (media & 0x8000) { if (media & 0x0800) - printk(KERN_INFO "%s: flipped to 10baseT\n", - dev->name); + netdev_info(dev, "flipped to 10baseT\n"); else - tc589_set_xcvr(dev, 2); + tc589_set_xcvr(dev, 2); } else if (media & 0x4000) { if (media & 0x0010) tc589_set_xcvr(dev, 1); else - printk(KERN_INFO "%s: flipped to 10base2\n", - dev->name); + netdev_info(dev, "flipped to 10base2\n"); } } lp->media_status = media; } - + EL3WINDOW(1); - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); reschedule: lp->media.expires = jiffies + HZ; @@ -786,7 +794,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) struct pcmcia_device *link = lp->p_dev; if (pcmcia_dev_present(link)) { - spin_lock_irqsave(&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); update_stats(dev); spin_unlock_irqrestore(&lp->lock, flags); } @@ -798,21 +806,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) single-threaded if the device is active. This is expected to be a rare operation, and it's simpler for the rest of the driver to assume that window 1 is always valid rather than use a special window-state variable. - + Caller must hold the lock for this */ static void update_stats(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - pr_debug("%s: updating the statistics.\n", dev->name); + netdev_dbg(dev, "updating the statistics.\n"); /* Turn off statistics updates while reading. */ outw(StatsDisable, ioaddr + EL3_CMD); /* Switch to the stats window, and read everything. */ EL3WINDOW(6); - dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); - /* Multiple collisions. */ inb(ioaddr + 2); + /* Multiple collisions. */ inb(ioaddr + 2); dev->stats.collisions += inb(ioaddr + 3); dev->stats.tx_window_errors += inb(ioaddr + 4); dev->stats.rx_fifo_errors += inb(ioaddr + 5); @@ -821,7 +829,7 @@ static void update_stats(struct net_device *dev) /* Tx deferrals */ inb(ioaddr + 8); /* Rx octets */ inw(ioaddr + 10); /* Tx octets */ inw(ioaddr + 12); - + /* Back to window 1, and turn statistics back on. */ EL3WINDOW(1); outw(StatsEnable, ioaddr + EL3_CMD); @@ -832,9 +840,9 @@ static int el3_rx(struct net_device *dev) unsigned int ioaddr = dev->base_addr; int worklimit = 32; short rx_status; - - pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", - dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); + + netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && worklimit > 0) { worklimit--; @@ -852,11 +860,11 @@ static int el3_rx(struct net_device *dev) } else { short pkt_len = rx_status & 0x7ff; struct sk_buff *skb; - + skb = dev_alloc_skb(pkt_len+5); - - pr_debug(" Receiving packet size %d status %4.4x.\n", - pkt_len, rx_status); + + netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); if (skb != NULL) { skb_reserve(skb, 2); insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), @@ -866,8 +874,8 @@ static int el3_rx(struct net_device *dev) dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } else { - pr_debug("%s: couldn't allocate a sk_buff of" - " size %d.\n", dev->name, pkt_len); + netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", + pkt_len); dev->stats.rx_dropped++; } } @@ -875,7 +883,7 @@ static int el3_rx(struct net_device *dev) tc589_wait_for_completion(dev, RxDiscard); } if (worklimit == 0) - printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name); + netdev_warn(dev, "too much work in el3_rx!\n"); return 0; } @@ -906,17 +914,17 @@ static int el3_close(struct net_device *dev) struct el3_private *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; - + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); if (pcmcia_dev_present(link)) { /* Turn off statistics ASAP. We update dev->stats below. */ outw(StatsDisable, ioaddr + EL3_CMD); - + /* Disable the receiver and transmitter. */ outw(RxDisable, ioaddr + EL3_CMD); outw(TxDisable, ioaddr + EL3_CMD); - + if (dev->if_port == 2) /* Turn off thinnet power. Green! */ outw(StopCoax, ioaddr + EL3_CMD); @@ -925,12 +933,12 @@ static int el3_close(struct net_device *dev) EL3WINDOW(4); outw(0, ioaddr + WN4_MEDIA); } - + /* Switching back to window 0 disables the IRQ. */ EL3WINDOW(0); /* But we explicitly zero the IRQ line select anyway. */ outw(0x0f00, ioaddr + WN0_IRQ); - + /* Check if the card still exists */ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) update_stats(dev); @@ -939,7 +947,7 @@ static int el3_close(struct net_device *dev) link->open--; netif_stop_queue(dev); del_timer_sync(&lp->media); - + return 0; } @@ -961,7 +969,7 @@ static struct pcmcia_driver tc589_driver = { }, .probe = tc589_probe, .remove = tc589_detach, - .id_table = tc589_ids, + .id_table = tc589_ids, .suspend = tc589_suspend, .resume = tc589_resume, }; diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 09291e60d30..70fc9591821 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -28,7 +28,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> @@ -1623,11 +1622,11 @@ static struct net_device_stats *get_stats(struct net_device *dev) static inline void make_mc_bits(u8 *bits, struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 crc; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc(ETH_ALEN, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index b9dc80b9d04..6734f7d6da9 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -1196,11 +1196,11 @@ static void set_rx_mode(struct net_device *dev) memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << (bit & 7)); } outb(2, ioaddr + RX_MODE); /* Use normal mode. */ diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index c717b143f11..c516c199635 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -1475,7 +1475,7 @@ static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; #ifdef PCMCIA_DEBUG { @@ -1495,8 +1495,8 @@ static void set_multicast_list(struct net_device *dev) if (num_addrs > 0) { /* Calculate multicast logical address filter */ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); - netdev_for_each_mc_addr(dmi, dev) { - memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(adr, ha->addr, ETHER_ADDR_LEN); BuildLAF(lp->multicast_ladrf, adr); } } diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 776cad2f571..4c0368de181 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -32,7 +32,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> @@ -1549,6 +1548,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), @@ -1740,7 +1740,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), - PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 5adc662c4bf..408f3d7b154 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev, { struct net_device *dev = priv; cisparse_t parse; + u8 *buf; if (pcmcia_parse_tuple(tuple, &parse)) return -EINVAL; - if ((parse.version_1.ns > 3) && - (cvt_ascii_address(dev, - (parse.version_1.str + parse.version_1.ofs[3])))) + buf = parse.version_1.str + parse.version_1.ofs[3]; + + if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) return 0; return -EINVAL; @@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x81, &buf); if (buf && len >= 13) { buf[12] = '\0'; - if (cvt_ascii_address(dev, buf)) + if (cvt_ascii_address(dev, buf) == 0) rc = 0; } kfree(buf); @@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link) if (i != 0) { printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); - goto config_undo; + goto config_failed; } smc->duplex = 0; @@ -998,6 +999,7 @@ config_undo: unregister_netdev(dev); config_failed: smc91c92_release(link); + free_netdev(dev); return -ENODEV; } /* smc91c92_config */ @@ -1606,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; struct smc_private *smc = netdev_priv(dev); - u_int multicast_table[ 2 ] = { 0, }; + unsigned char multicast_table[8]; unsigned long flags; u_short rx_cfg_setting; + int i; + + memset(multicast_table, 0, sizeof(multicast_table)); if (dev->flags & IFF_PROMISC) { rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; @@ -1616,14 +1621,10 @@ static void set_rx_mode(struct net_device *dev) rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; else { if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mc_addr; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_addr, dev) { - u_int position = ether_crc(6, mc_addr->dmi_addr); -#ifndef final_version /* Verify multicast address. */ - if ((mc_addr->dmi_addr[0] & 1) == 0) - continue; -#endif + netdev_for_each_mc_addr(ha, dev) { + u_int position = ether_crc(6, ha->addr); multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); } } @@ -1633,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev) /* Load MC table and Rx setting into the chip without interrupts. */ spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); - outl(multicast_table[0], ioaddr + MULTICAST0); - outl(multicast_table[1], ioaddr + MULTICAST4); + for (i = 0; i < 8; i++) + outb(multicast_table[i], ioaddr + MULTICAST0 + i); SMC_SELECT_BANK(0); outw(rx_cfg_setting, ioaddr + RCR); SMC_SELECT_BANK(2); diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 4d1802e457b..656be931207 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -1398,7 +1398,7 @@ static void set_addresses(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; local_info_t *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct set_address_info sa_info; int i; @@ -1413,10 +1413,10 @@ static void set_addresses(struct net_device *dev) set_address(&sa_info, dev->dev_addr); i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i++ == 9) break; - set_address(&sa_info, dmi->dmi_addr); + set_address(&sa_info, ha->addr); } while (i++ < 9) set_address(&sa_info, dev->dev_addr); diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 084d78dd163..a2254f749a9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -2590,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev) struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; volatile __le16 *mcast_table = (__le16 *)ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned long ioaddr = dev->base_addr; char *addrs; int i; @@ -2611,8 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev) ib->filter[1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 4fed95e8350..c1281567983 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void) module_init(bcm63xx_phy_init); module_exit(bcm63xx_phy_exit); + +static struct mdio_device_id bcm63xx_tbl[] = { + { 0x00406000, 0xfffffc00 }, + { 0x002bdc00, 0xfffffc00 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index f482fc4f8cf..cecdbbd549e 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -908,3 +908,19 @@ static void __exit broadcom_exit(void) module_init(broadcom_init); module_exit(broadcom_exit); + +static struct mdio_device_id broadcom_tbl[] = { + { 0x00206070, 0xfffffff0 }, + { 0x002060e0, 0xfffffff0 }, + { 0x002060c0, 0xfffffff0 }, + { 0x002060b0, 0xfffffff0 }, + { 0x0143bca0, 0xfffffff0 }, + { 0x0143bcb0, 0xfffffff0 }, + { PHY_ID_BCM50610, 0xfffffff0 }, + { PHY_ID_BCM50610M, 0xfffffff0 }, + { PHY_ID_BCM57780, 0xfffffff0 }, + { PHY_ID_BCMAC131, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, broadcom_tbl); diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index a1bd599c8a5..1a325d63756 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -159,3 +158,11 @@ static void __exit cicada_exit(void) module_init(cicada_init); module_exit(cicada_exit); + +static struct mdio_device_id cicada_tbl[] = { + { 0x000fc410, 0x000ffff0 }, + { 0x000fc440, 0x000fffc0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, cicada_tbl); diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index d926168bc78..29c17617a2e 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -219,3 +218,12 @@ static void __exit davicom_exit(void) module_init(davicom_init); module_exit(davicom_exit); + +static struct mdio_device_id davicom_tbl[] = { + { 0x0181b880, 0x0ffffff0 }, + { 0x0181b8a0, 0x0ffffff0 }, + { 0x00181b80, 0x0ffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, davicom_tbl); diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index b031fa21f1a..13995f52d6a 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -111,3 +110,10 @@ static void __exit et1011c_exit(void) module_init(et1011c_init); module_exit(et1011c_exit); + +static struct mdio_device_id et1011c_tbl[] = { + { 0x0282f014, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, et1011c_tbl); diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index e7070515d2e..1fa4d73c3cc 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -20,6 +20,7 @@ #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/err.h> +#include <linux/slab.h> #define MII_REGS_NUM 29 diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index af3f1f2a9f8..439adafeacb 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -13,7 +13,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -132,3 +131,10 @@ static void __exit ip175c_exit(void) module_init(ip175c_init); module_exit(ip175c_exit); + +static struct mdio_device_id icplus_tbl[] = { + { 0x02430d80, 0x0ffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, icplus_tbl); diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 4cf3324ba16..8ee929b796d 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -174,3 +173,11 @@ static void __exit lxt_exit(void) module_init(lxt_init); module_exit(lxt_exit); + +static struct mdio_device_id lxt_tbl[] = { + { 0x78100000, 0xfffffff0 }, + { 0x001378e0, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, lxt_tbl); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 65ed385c2ce..78b74e83ce5 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -649,3 +648,16 @@ static void __exit marvell_exit(void) module_init(marvell_init); module_exit(marvell_exit); + +static struct mdio_device_id marvell_tbl[] = { + { 0x01410c60, 0xfffffff0 }, + { 0x01410c90, 0xfffffff0 }, + { 0x01410cc0, 0xfffffff0 }, + { 0x01410e10, 0xfffffff0 }, + { 0x01410cb0, 0xfffffff0 }, + { 0x01410cd0, 0xfffffff0 }, + { 0x01410e30, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, marvell_tbl); diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 2576055b350..65391891d8c 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -19,12 +19,16 @@ #include <linux/module.h> #include <linux/mdio-bitbang.h> -#include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> -#define MDIO_READ 1 -#define MDIO_WRITE 0 +#define MDIO_READ 2 +#define MDIO_WRITE 1 + +#define MDIO_C45 (1<<15) +#define MDIO_C45_ADDR (MDIO_C45 | 0) +#define MDIO_C45_READ (MDIO_C45 | 3) +#define MDIO_C45_WRITE (MDIO_C45 | 1) #define MDIO_SETUP_TIME 10 #define MDIO_HOLD_TIME 10 @@ -90,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) /* Utility to send the preamble, address, and * register (common to read and write). */ -static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) +static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) { const struct mdiobb_ops *ops = ctrl->ops; int i; @@ -109,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) for (i = 0; i < 32; i++) mdiobb_send_bit(ctrl, 1); - /* send the start bit (01) and the read opcode (10) or write (10) */ + /* send the start bit (01) and the read opcode (10) or write (10). + Clause 45 operation uses 00 for the start and 11, 10 for + read/write */ mdiobb_send_bit(ctrl, 0); - mdiobb_send_bit(ctrl, 1); - mdiobb_send_bit(ctrl, read); - mdiobb_send_bit(ctrl, !read); + if (op & MDIO_C45) + mdiobb_send_bit(ctrl, 0); + else + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, (op >> 1) & 1); + mdiobb_send_bit(ctrl, (op >> 0) & 1); mdiobb_send_num(ctrl, phy, 5); mdiobb_send_num(ctrl, reg, 5); } +/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the + lower 16 bits of the 21 bit address. This transfer is done identically to a + MDIO_WRITE except for a different code. To enable clause 45 mode or + MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices + can exist on the same bus. Normal devices should ignore the MDIO_ADDR + phase. */ +static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) +{ + unsigned int dev_addr = (addr >> 16) & 0x1F; + unsigned int reg = addr & 0xFFFF; + mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); + + /* send the turnaround (10) */ + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, 0); + + mdiobb_send_num(ctrl, reg, 16); + + ctrl->ops->set_mdio_dir(ctrl, 0); + mdiobb_get_bit(ctrl); + + return dev_addr; +} static int mdiobb_read(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; - mdiobb_cmd(ctrl, MDIO_READ, phy, reg); + if (reg & MII_ADDR_C45) { + reg = mdiobb_cmd_addr(ctrl, phy, reg); + mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); + } else + mdiobb_cmd(ctrl, MDIO_READ, phy, reg); + ctrl->ops->set_mdio_dir(ctrl, 0); /* check the turnaround bit: the PHY should be driving it to zero */ @@ -148,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; - mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); + if (reg & MII_ADDR_C45) { + reg = mdiobb_cmd_addr(ctrl, phy, reg); + mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); + } else + mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index 61a4461cbda..a872aea4ed7 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -6,6 +6,7 @@ * Copyright (C) 2009 Cavium Networks */ +#include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index e17b70291bb..6a6b8199a0d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan); * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ -int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum) +int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; @@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read); * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ -int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val) +int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 6c636eb7208..729ab29ba28 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -153,3 +153,10 @@ MODULE_LICENSE("GPL"); module_init(ns_init); module_exit(ns_exit); + +static struct mdio_device_id ns_tbl[] = { + { DP83865_PHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, ns_tbl); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 0295097d6c4..64be4664cca 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -19,7 +19,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index db1794546c5..1a99bb24410 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups); struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) { struct phy_device *dev; + /* We allocate the device, and initialize the * default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) mutex_init(&dev->lock); INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); + /* Request the appropriate module unconditionally; don't + bother trying to do so only if it isn't already loaded, + because that gets complicated. A hotplug event would have + done an unconditional modprobe anyway. + We don't do normal hotplug because it won't work for MDIO + -- because it relies on the device staying around for long + enough for the driver to get loaded. With MDIO, the NIC + driver will get bored and give up as soon as it finds that + there's no driver _already_ loaded. */ + request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); + return dev; } EXPORT_SYMBOL(phy_device_create); diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index 23062d06723..6736b23f1b2 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> @@ -138,3 +137,10 @@ static void __exit qs6612_exit(void) module_init(qs6612_init); module_exit(qs6612_exit); + +static struct mdio_device_id qs6612_tbl[] = { + { 0x00181440, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, qs6612_tbl); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index a052a6744a5..f567c0e1aaa 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -78,3 +78,10 @@ static void __exit realtek_exit(void) module_init(realtek_init); module_exit(realtek_exit); + +static struct mdio_device_id realtek_tbl[] = { + { 0x001cc912, 0x001fffff }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, realtek_tbl); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index ed2644a5750..78fa988256f 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -253,3 +253,14 @@ MODULE_LICENSE("GPL"); module_init(smsc_init); module_exit(smsc_exit); + +static struct mdio_device_id smsc_tbl[] = { + { 0x0007c0a0, 0xfffffff0 }, + { 0x0007c0b0, 0xfffffff0 }, + { 0x0007c0c0, 0xfffffff0 }, + { 0x0007c0d0, 0xfffffff0 }, + { 0x0007c0f0, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, smsc_tbl); diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 6bdb0d53aaf..72290099e5e 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void) module_init(ste10Xp_init); module_exit(ste10Xp_exit); +static struct mdio_device_id ste10Xp_tbl[] = { + { STE101P_PHY_ID, 0xfffffff0 }, + { STE100P_PHY_ID, 0xffffffff }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl); + MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index dd3b2447e85..45cce50a279 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void) module_init(vsc82xx_init); module_exit(vsc82xx_exit); + +static struct mdio_device_id vitesse_tbl[] = { + { PHY_ID_VSC8244, 0x000fffc0 }, + { PHY_ID_VSC8221, 0x000ffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, vitesse_tbl); diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 3327e9fc7b5..9a2103a69e1 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -94,6 +94,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/string.h> +#include <linux/slab.h> #include <linux/if_ether.h> #include <linux/in.h> #include <linux/errno.h> diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6a375ea4947..6c2e8fa0ca3 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -31,6 +31,7 @@ #include <linux/spinlock.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/slab.h> #include <asm/uaccess.h> #include <asm/string.h> diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6d61602208c..35f195329fd 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -46,6 +46,7 @@ #include <linux/stddef.h> #include <linux/device.h> #include <linux/mutex.h> +#include <linux/slab.h> #include <net/slhc_vj.h> #include <asm/atomic.h> @@ -2163,6 +2164,24 @@ int ppp_unit_number(struct ppp_channel *chan) } /* + * Return the PPP device interface name of a channel. + */ +char *ppp_dev_name(struct ppp_channel *chan) +{ + struct channel *pch = chan->ppp; + char *name = NULL; + + if (pch) { + read_lock_bh(&pch->upl); + if (pch->ppp && pch->ppp->dev) + name = pch->ppp->dev->name; + read_unlock_bh(&pch->upl); + } + return name; +} + + +/* * Disconnect a channel from the generic layer. * This must be called in process context. */ @@ -2890,6 +2909,7 @@ EXPORT_SYMBOL(ppp_register_channel); EXPORT_SYMBOL(ppp_unregister_channel); EXPORT_SYMBOL(ppp_channel_index); EXPORT_SYMBOL(ppp_unit_number); +EXPORT_SYMBOL(ppp_dev_name); EXPORT_SYMBOL(ppp_input); EXPORT_SYMBOL(ppp_input_error); EXPORT_SYMBOL(ppp_output_wakeup); diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 3a13cecae3e..52938da1e54 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -44,6 +44,7 @@ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/init.h> +#include <linux/slab.h> #include <asm/uaccess.h> #define PPP_VERSION "2.4.2" diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c deleted file mode 100644 index 9fbb2eba9a0..00000000000 --- a/drivers/net/pppol2tp.c +++ /dev/null @@ -1,2676 +0,0 @@ -/***************************************************************************** - * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets - * - * PPPoX --- Generic PPP encapsulation socket family - * PPPoL2TP --- PPP over L2TP (RFC 2661) - * - * Version: 1.0.0 - * - * Authors: Martijn van Oosterhout <kleptog@svana.org> - * James Chapman (jchapman@katalix.com) - * Contributors: - * Michal Ostrowski <mostrows@speakeasy.net> - * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> - * David S. Miller (davem@redhat.com) - * - * License: - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -/* This driver handles only L2TP data frames; control frames are handled by a - * userspace application. - * - * To send data in an L2TP session, userspace opens a PPPoL2TP socket and - * attaches it to a bound UDP socket with local tunnel_id / session_id and - * peer tunnel_id / session_id set. Data can then be sent or received using - * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket - * can be read or modified using ioctl() or [gs]etsockopt() calls. - * - * When a PPPoL2TP socket is connected with local and peer session_id values - * zero, the socket is treated as a special tunnel management socket. - * - * Here's example userspace code to create a socket for sending/receiving data - * over an L2TP session:- - * - * struct sockaddr_pppol2tp sax; - * int fd; - * int session_fd; - * - * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); - * - * sax.sa_family = AF_PPPOX; - * sax.sa_protocol = PX_PROTO_OL2TP; - * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket - * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; - * sax.pppol2tp.addr.sin_port = addr->sin_port; - * sax.pppol2tp.addr.sin_family = AF_INET; - * sax.pppol2tp.s_tunnel = tunnel_id; - * sax.pppol2tp.s_session = session_id; - * sax.pppol2tp.d_tunnel = peer_tunnel_id; - * sax.pppol2tp.d_session = peer_session_id; - * - * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); - * - * A pppd plugin that allows PPP traffic to be carried over L2TP using - * this driver is available from the OpenL2TP project at - * http://openl2tp.sourceforge.net. - */ - -#include <linux/module.h> -#include <linux/string.h> -#include <linux/list.h> -#include <asm/uaccess.h> - -#include <linux/kernel.h> -#include <linux/spinlock.h> -#include <linux/kthread.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/jiffies.h> - -#include <linux/netdevice.h> -#include <linux/net.h> -#include <linux/inetdevice.h> -#include <linux/skbuff.h> -#include <linux/init.h> -#include <linux/ip.h> -#include <linux/udp.h> -#include <linux/if_pppox.h> -#include <linux/if_pppol2tp.h> -#include <net/sock.h> -#include <linux/ppp_channel.h> -#include <linux/ppp_defs.h> -#include <linux/if_ppp.h> -#include <linux/file.h> -#include <linux/hash.h> -#include <linux/sort.h> -#include <linux/proc_fs.h> -#include <linux/nsproxy.h> -#include <net/net_namespace.h> -#include <net/netns/generic.h> -#include <net/dst.h> -#include <net/ip.h> -#include <net/udp.h> -#include <net/xfrm.h> - -#include <asm/byteorder.h> -#include <asm/atomic.h> - - -#define PPPOL2TP_DRV_VERSION "V1.0" - -/* L2TP header constants */ -#define L2TP_HDRFLAG_T 0x8000 -#define L2TP_HDRFLAG_L 0x4000 -#define L2TP_HDRFLAG_S 0x0800 -#define L2TP_HDRFLAG_O 0x0200 -#define L2TP_HDRFLAG_P 0x0100 - -#define L2TP_HDR_VER_MASK 0x000F -#define L2TP_HDR_VER 0x0002 - -/* Space for UDP, L2TP and PPP headers */ -#define PPPOL2TP_HEADER_OVERHEAD 40 - -/* Just some random numbers */ -#define L2TP_TUNNEL_MAGIC 0x42114DDA -#define L2TP_SESSION_MAGIC 0x0C04EB7D - -#define PPPOL2TP_HASH_BITS 4 -#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS) - -/* Default trace flags */ -#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0 - -#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ - do { \ - if ((_mask) & (_type)) \ - printk(_lvl "PPPOL2TP: " _fmt, ##args); \ - } while(0) - -/* Number of bytes to build transmit L2TP headers. - * Unfortunately the size is different depending on whether sequence numbers - * are enabled. - */ -#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 -#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 - -struct pppol2tp_tunnel; - -/* Describes a session. It is the sk_user_data field in the PPPoL2TP - * socket. Contains information to determine incoming packets and transmit - * outgoing ones. - */ -struct pppol2tp_session -{ - int magic; /* should be - * L2TP_SESSION_MAGIC */ - int owner; /* pid that opened the socket */ - - struct sock *sock; /* Pointer to the session - * PPPoX socket */ - struct sock *tunnel_sock; /* Pointer to the tunnel UDP - * socket */ - - struct pppol2tp_addr tunnel_addr; /* Description of tunnel */ - - struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel - * context */ - - char name[20]; /* "sess xxxxx/yyyyy", where - * x=tunnel_id, y=session_id */ - int mtu; - int mru; - int flags; /* accessed by PPPIOCGFLAGS. - * Unused. */ - unsigned recv_seq:1; /* expect receive packets with - * sequence numbers? */ - unsigned send_seq:1; /* send packets with sequence - * numbers? */ - unsigned lns_mode:1; /* behave as LNS? LAC enables - * sequence numbers under - * control of LNS. */ - int debug; /* bitmask of debug message - * categories */ - int reorder_timeout; /* configured reorder timeout - * (in jiffies) */ - u16 nr; /* session NR state (receive) */ - u16 ns; /* session NR state (send) */ - struct sk_buff_head reorder_q; /* receive reorder queue */ - struct pppol2tp_ioc_stats stats; - struct hlist_node hlist; /* Hash list node */ -}; - -/* The sk_user_data field of the tunnel's UDP socket. It contains info to track - * all the associated sessions so incoming packets can be sorted out - */ -struct pppol2tp_tunnel -{ - int magic; /* Should be L2TP_TUNNEL_MAGIC */ - rwlock_t hlist_lock; /* protect session_hlist */ - struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE]; - /* hashed list of sessions, - * hashed by id */ - int debug; /* bitmask of debug message - * categories */ - char name[12]; /* "tunl xxxxx" */ - struct pppol2tp_ioc_stats stats; - - void (*old_sk_destruct)(struct sock *); - - struct sock *sock; /* Parent socket */ - struct list_head list; /* Keep a list of all open - * prepared sockets */ - struct net *pppol2tp_net; /* the net we belong to */ - - atomic_t ref_count; -}; - -/* Private data stored for received packets in the skb. - */ -struct pppol2tp_skb_cb { - u16 ns; - u16 nr; - u16 has_seq; - u16 length; - unsigned long expires; -}; - -#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) - -static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); -static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel); - -static atomic_t pppol2tp_tunnel_count; -static atomic_t pppol2tp_session_count; -static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; -static const struct proto_ops pppol2tp_ops; - -/* per-net private data for this module */ -static int pppol2tp_net_id __read_mostly; -struct pppol2tp_net { - struct list_head pppol2tp_tunnel_list; - rwlock_t pppol2tp_tunnel_list_lock; -}; - -static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net) -{ - BUG_ON(!net); - - return net_generic(net, pppol2tp_net_id); -} - -/* Helpers to obtain tunnel/session contexts from sockets. - */ -static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk) -{ - struct pppol2tp_session *session; - - if (sk == NULL) - return NULL; - - sock_hold(sk); - session = (struct pppol2tp_session *)(sk->sk_user_data); - if (session == NULL) { - sock_put(sk); - goto out; - } - - BUG_ON(session->magic != L2TP_SESSION_MAGIC); -out: - return session; -} - -static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk) -{ - struct pppol2tp_tunnel *tunnel; - - if (sk == NULL) - return NULL; - - sock_hold(sk); - tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); - if (tunnel == NULL) { - sock_put(sk); - goto out; - } - - BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); -out: - return tunnel; -} - -/* Tunnel reference counts. Incremented per session that is added to - * the tunnel. - */ -static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel) -{ - atomic_inc(&tunnel->ref_count); -} - -static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel) -{ - if (atomic_dec_and_test(&tunnel->ref_count)) - pppol2tp_tunnel_free(tunnel); -} - -/* Session hash list. - * The session_id SHOULD be random according to RFC2661, but several - * L2TP implementations (Cisco and Microsoft) use incrementing - * session_ids. So we do a real hash on the session_id, rather than a - * simple bitmask. - */ -static inline struct hlist_head * -pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id) -{ - unsigned long hash_val = (unsigned long) session_id; - return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)]; -} - -/* Lookup a session by id - */ -static struct pppol2tp_session * -pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id) -{ - struct hlist_head *session_list = - pppol2tp_session_id_hash(tunnel, session_id); - struct pppol2tp_session *session; - struct hlist_node *walk; - - read_lock_bh(&tunnel->hlist_lock); - hlist_for_each_entry(session, walk, session_list, hlist) { - if (session->tunnel_addr.s_session == session_id) { - read_unlock_bh(&tunnel->hlist_lock); - return session; - } - } - read_unlock_bh(&tunnel->hlist_lock); - - return NULL; -} - -/* Lookup a tunnel by id - */ -static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id) -{ - struct pppol2tp_tunnel *tunnel; - struct pppol2tp_net *pn = pppol2tp_pernet(net); - - read_lock_bh(&pn->pppol2tp_tunnel_list_lock); - list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) { - if (tunnel->stats.tunnel_id == tunnel_id) { - read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); - return tunnel; - } - } - read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); - - return NULL; -} - -/***************************************************************************** - * Receive data handling - *****************************************************************************/ - -/* Queue a skb in order. We come here only if the skb has an L2TP sequence - * number. - */ -static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb) -{ - struct sk_buff *skbp; - struct sk_buff *tmp; - u16 ns = PPPOL2TP_SKB_CB(skb)->ns; - - spin_lock_bh(&session->reorder_q.lock); - skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { - if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { - __skb_queue_before(&session->reorder_q, skbp, skb); - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", - session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns, - skb_queue_len(&session->reorder_q)); - session->stats.rx_oos_packets++; - goto out; - } - } - - __skb_queue_tail(&session->reorder_q, skb); - -out: - spin_unlock_bh(&session->reorder_q.lock); -} - -/* Dequeue a single skb. - */ -static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb) -{ - struct pppol2tp_tunnel *tunnel = session->tunnel; - int length = PPPOL2TP_SKB_CB(skb)->length; - struct sock *session_sock = NULL; - - /* We're about to requeue the skb, so return resources - * to its current owner (a socket receive buffer). - */ - skb_orphan(skb); - - tunnel->stats.rx_packets++; - tunnel->stats.rx_bytes += length; - session->stats.rx_packets++; - session->stats.rx_bytes += length; - - if (PPPOL2TP_SKB_CB(skb)->has_seq) { - /* Bump our Nr */ - session->nr++; - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated nr to %hu\n", session->name, session->nr); - } - - /* If the socket is bound, send it in to PPP's input queue. Otherwise - * queue it on the session socket. - */ - session_sock = session->sock; - if (session_sock->sk_state & PPPOX_BOUND) { - struct pppox_sock *po; - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: recv %d byte data frame, passing to ppp\n", - session->name, length); - - /* We need to forget all info related to the L2TP packet - * gathered in the skb as we are going to reuse the same - * skb for the inner packet. - * Namely we need to: - * - reset xfrm (IPSec) information as it applies to - * the outer L2TP packet and not to the inner one - * - release the dst to force a route lookup on the inner - * IP packet since skb->dst currently points to the dst - * of the UDP tunnel - * - reset netfilter information as it doesn't apply - * to the inner packet either - */ - secpath_reset(skb); - skb_dst_drop(skb); - nf_reset(skb); - - po = pppox_sk(session_sock); - ppp_input(&po->chan, skb); - } else { - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: socket not bound\n", session->name); - - /* Not bound. Nothing we can do, so discard. */ - session->stats.rx_errors++; - kfree_skb(skb); - } - - sock_put(session->sock); -} - -/* Dequeue skbs from the session's reorder_q, subject to packet order. - * Skbs that have been in the queue for too long are simply discarded. - */ -static void pppol2tp_recv_dequeue(struct pppol2tp_session *session) -{ - struct sk_buff *skb; - struct sk_buff *tmp; - - /* If the pkt at the head of the queue has the nr that we - * expect to send up next, dequeue it and any other - * in-sequence packets behind it. - */ - spin_lock_bh(&session->reorder_q.lock); - skb_queue_walk_safe(&session->reorder_q, skb, tmp) { - if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) { - session->stats.rx_seq_discards++; - session->stats.rx_errors++; - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %hu len %d discarded (too old), " - "waiting for %hu, reorder_q_len=%d\n", - session->name, PPPOL2TP_SKB_CB(skb)->ns, - PPPOL2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); - __skb_unlink(skb, &session->reorder_q); - kfree_skb(skb); - sock_put(session->sock); - continue; - } - - if (PPPOL2TP_SKB_CB(skb)->has_seq) { - if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: holding oos pkt %hu len %d, " - "waiting for %hu, reorder_q_len=%d\n", - session->name, PPPOL2TP_SKB_CB(skb)->ns, - PPPOL2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); - goto out; - } - } - __skb_unlink(skb, &session->reorder_q); - - /* Process the skb. We release the queue lock while we - * do so to let other contexts process the queue. - */ - spin_unlock_bh(&session->reorder_q.lock); - pppol2tp_recv_dequeue_skb(session, skb); - spin_lock_bh(&session->reorder_q.lock); - } - -out: - spin_unlock_bh(&session->reorder_q.lock); -} - -static inline int pppol2tp_verify_udp_checksum(struct sock *sk, - struct sk_buff *skb) -{ - struct udphdr *uh = udp_hdr(skb); - u16 ulen = ntohs(uh->len); - struct inet_sock *inet; - __wsum psum; - - if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) - return 0; - - inet = inet_sk(sk); - psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, - IPPROTO_UDP, 0); - - if ((skb->ip_summed == CHECKSUM_COMPLETE) && - !csum_fold(csum_add(psum, skb->csum))) - return 0; - - skb->csum = psum; - - return __skb_checksum_complete(skb); -} - -/* Internal receive frame. Do the real work of receiving an L2TP data frame - * here. The skb is not on a list when we get here. - * Returns 0 if the packet was a data packet and was successfully passed on. - * Returns 1 if the packet was not a good data packet and could not be - * forwarded. All such packets are passed up to userspace to deal with. - */ -static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) -{ - struct pppol2tp_session *session = NULL; - struct pppol2tp_tunnel *tunnel; - unsigned char *ptr, *optr; - u16 hdrflags; - u16 tunnel_id, session_id; - int length; - int offset; - - tunnel = pppol2tp_sock_to_tunnel(sock); - if (tunnel == NULL) - goto no_tunnel; - - if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb)) - goto discard_bad_csum; - - /* UDP always verifies the packet length. */ - __skb_pull(skb, sizeof(struct udphdr)); - - /* Short packet? */ - if (!pskb_may_pull(skb, 12)) { - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); - goto error; - } - - /* Point to L2TP header */ - optr = ptr = skb->data; - - /* Get L2TP header flags */ - hdrflags = ntohs(*(__be16*)ptr); - - /* Trace packet contents, if enabled */ - if (tunnel->debug & PPPOL2TP_MSG_DATA) { - length = min(16u, skb->len); - if (!pskb_may_pull(skb, length)) - goto error; - - printk(KERN_DEBUG "%s: recv: ", tunnel->name); - - offset = 0; - do { - printk(" %02X", ptr[offset]); - } while (++offset < length); - - printk("\n"); - } - - /* Get length of L2TP packet */ - length = skb->len; - - /* If type is control packet, it is handled by userspace. */ - if (hdrflags & L2TP_HDRFLAG_T) { - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: recv control packet, len=%d\n", tunnel->name, length); - goto error; - } - - /* Skip flags */ - ptr += 2; - - /* If length is present, skip it */ - if (hdrflags & L2TP_HDRFLAG_L) - ptr += 2; - - /* Extract tunnel and session ID */ - tunnel_id = ntohs(*(__be16 *) ptr); - ptr += 2; - session_id = ntohs(*(__be16 *) ptr); - ptr += 2; - - /* Find the session context */ - session = pppol2tp_session_find(tunnel, session_id); - if (!session) { - /* Not found? Pass to userspace to deal with */ - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: no socket found (%hu/%hu). Passing up.\n", - tunnel->name, tunnel_id, session_id); - goto error; - } - sock_hold(session->sock); - - /* The ref count on the socket was increased by the above call since - * we now hold a pointer to the session. Take care to do sock_put() - * when exiting this function from now on... - */ - - /* Handle the optional sequence numbers. If we are the LAC, - * enable/disable sequence numbers under the control of the LNS. If - * no sequence numbers present but we were expecting them, discard - * frame. - */ - if (hdrflags & L2TP_HDRFLAG_S) { - u16 ns, nr; - ns = ntohs(*(__be16 *) ptr); - ptr += 2; - nr = ntohs(*(__be16 *) ptr); - ptr += 2; - - /* Received a packet with sequence numbers. If we're the LNS, - * check if we sre sending sequence numbers and if not, - * configure it so. - */ - if ((!session->lns_mode) && (!session->send_seq)) { - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO, - "%s: requested to enable seq numbers by LNS\n", - session->name); - session->send_seq = -1; - } - - /* Store L2TP info in the skb */ - PPPOL2TP_SKB_CB(skb)->ns = ns; - PPPOL2TP_SKB_CB(skb)->nr = nr; - PPPOL2TP_SKB_CB(skb)->has_seq = 1; - - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n", - session->name, ns, nr, session->nr); - } else { - /* No sequence numbers. - * If user has configured mandatory sequence numbers, discard. - */ - if (session->recv_seq) { - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); - session->stats.rx_seq_discards++; - goto discard; - } - - /* If we're the LAC and we're sending sequence numbers, the - * LNS has requested that we no longer send sequence numbers. - * If we're the LNS and we're sending sequence numbers, the - * LAC is broken. Discard the frame. - */ - if ((!session->lns_mode) && (session->send_seq)) { - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO, - "%s: requested to disable seq numbers by LNS\n", - session->name); - session->send_seq = 0; - } else if (session->send_seq) { - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING, - "%s: recv data has no seq numbers when required. " - "Discarding\n", session->name); - session->stats.rx_seq_discards++; - goto discard; - } - - /* Store L2TP info in the skb */ - PPPOL2TP_SKB_CB(skb)->has_seq = 0; - } - - /* If offset bit set, skip it. */ - if (hdrflags & L2TP_HDRFLAG_O) { - offset = ntohs(*(__be16 *)ptr); - ptr += 2 + offset; - } - - offset = ptr - optr; - if (!pskb_may_pull(skb, offset)) - goto discard; - - __skb_pull(skb, offset); - - /* Skip PPP header, if present. In testing, Microsoft L2TP clients - * don't send the PPP header (PPP header compression enabled), but - * other clients can include the header. So we cope with both cases - * here. The PPP header is always FF03 when using L2TP. - * - * Note that skb->data[] isn't dereferenced from a u16 ptr here since - * the field may be unaligned. - */ - if (!pskb_may_pull(skb, 2)) - goto discard; - - if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) - skb_pull(skb, 2); - - /* Prepare skb for adding to the session's reorder_q. Hold - * packets for max reorder_timeout or 1 second if not - * reordering. - */ - PPPOL2TP_SKB_CB(skb)->length = length; - PPPOL2TP_SKB_CB(skb)->expires = jiffies + - (session->reorder_timeout ? session->reorder_timeout : HZ); - - /* Add packet to the session's receive queue. Reordering is done here, if - * enabled. Saved L2TP protocol info is stored in skb->sb[]. - */ - if (PPPOL2TP_SKB_CB(skb)->has_seq) { - if (session->reorder_timeout != 0) { - /* Packet reordering enabled. Add skb to session's - * reorder queue, in order of ns. - */ - pppol2tp_recv_queue_skb(session, skb); - } else { - /* Packet reordering disabled. Discard out-of-sequence - * packets - */ - if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { - session->stats.rx_seq_discards++; - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: oos pkt %hu len %d discarded, " - "waiting for %hu, reorder_q_len=%d\n", - session->name, PPPOL2TP_SKB_CB(skb)->ns, - PPPOL2TP_SKB_CB(skb)->length, session->nr, - skb_queue_len(&session->reorder_q)); - goto discard; - } - skb_queue_tail(&session->reorder_q, skb); - } - } else { - /* No sequence numbers. Add the skb to the tail of the - * reorder queue. This ensures that it will be - * delivered after all previous sequenced skbs. - */ - skb_queue_tail(&session->reorder_q, skb); - } - - /* Try to dequeue as many skbs from reorder_q as we can. */ - pppol2tp_recv_dequeue(session); - - return 0; - -discard: - session->stats.rx_errors++; - kfree_skb(skb); - sock_put(session->sock); - sock_put(sock); - - return 0; - -discard_bad_csum: - LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); - UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0); - tunnel->stats.rx_errors++; - kfree_skb(skb); - - return 0; - -error: - /* Put UDP header back */ - __skb_push(skb, sizeof(struct udphdr)); - sock_put(sock); - -no_tunnel: - return 1; -} - -/* UDP encapsulation receive handler. See net/ipv4/udp.c. - * Return codes: - * 0 : success. - * <0: error - * >0: skb should be passed up to userspace as UDP. - */ -static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) -{ - struct pppol2tp_tunnel *tunnel; - - tunnel = pppol2tp_sock_to_tunnel(sk); - if (tunnel == NULL) - goto pass_up; - - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: received %d bytes\n", tunnel->name, skb->len); - - if (pppol2tp_recv_core(sk, skb)) - goto pass_up_put; - - sock_put(sk); - return 0; - -pass_up_put: - sock_put(sk); -pass_up: - return 1; -} - -/* Receive message. This is the recvmsg for the PPPoL2TP socket. - */ -static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, - int flags) -{ - int err; - struct sk_buff *skb; - struct sock *sk = sock->sk; - - err = -EIO; - if (sk->sk_state & PPPOX_BOUND) - goto end; - - msg->msg_namelen = 0; - - err = 0; - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &err); - if (!skb) - goto end; - - if (len > skb->len) - len = skb->len; - else if (len < skb->len) - msg->msg_flags |= MSG_TRUNC; - - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); - if (likely(err == 0)) - err = len; - - kfree_skb(skb); -end: - return err; -} - -/************************************************************************ - * Transmit handling - ***********************************************************************/ - -/* Tell how big L2TP headers are for a particular session. This - * depends on whether sequence numbers are being used. - */ -static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session) -{ - if (session->send_seq) - return PPPOL2TP_L2TP_HDR_SIZE_SEQ; - - return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; -} - -/* Build an L2TP header for the session into the buffer provided. - */ -static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session, - void *buf) -{ - __be16 *bufp = buf; - u16 flags = L2TP_HDR_VER; - - if (session->send_seq) - flags |= L2TP_HDRFLAG_S; - - /* Setup L2TP header. - * FIXME: Can this ever be unaligned? Is direct dereferencing of - * 16-bit header fields safe here for all architectures? - */ - *bufp++ = htons(flags); - *bufp++ = htons(session->tunnel_addr.d_tunnel); - *bufp++ = htons(session->tunnel_addr.d_session); - if (session->send_seq) { - *bufp++ = htons(session->ns); - *bufp++ = 0; - session->ns++; - PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, - "%s: updated ns to %hu\n", session->name, session->ns); - } -} - -/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here - * when a user application does a sendmsg() on the session socket. L2TP and - * PPP headers must be inserted into the user's data. - */ -static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, - size_t total_len) -{ - static const unsigned char ppph[2] = { 0xff, 0x03 }; - struct sock *sk = sock->sk; - struct inet_sock *inet; - __wsum csum; - struct sk_buff *skb; - int error; - int hdr_len; - struct pppol2tp_session *session; - struct pppol2tp_tunnel *tunnel; - struct udphdr *uh; - unsigned int len; - struct sock *sk_tun; - u16 udp_len; - - error = -ENOTCONN; - if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) - goto error; - - /* Get session and tunnel contexts */ - error = -EBADF; - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto error; - - sk_tun = session->tunnel_sock; - tunnel = pppol2tp_sock_to_tunnel(sk_tun); - if (tunnel == NULL) - goto error_put_sess; - - /* What header length is configured for this session? */ - hdr_len = pppol2tp_l2tp_header_len(session); - - /* Allocate a socket buffer */ - error = -ENOMEM; - skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + - sizeof(struct udphdr) + hdr_len + - sizeof(ppph) + total_len, - 0, GFP_KERNEL); - if (!skb) - goto error_put_sess_tun; - - /* Reserve space for headers. */ - skb_reserve(skb, NET_SKB_PAD); - skb_reset_network_header(skb); - skb_reserve(skb, sizeof(struct iphdr)); - skb_reset_transport_header(skb); - - /* Build UDP header */ - inet = inet_sk(sk_tun); - udp_len = hdr_len + sizeof(ppph) + total_len; - uh = (struct udphdr *) skb->data; - uh->source = inet->inet_sport; - uh->dest = inet->inet_dport; - uh->len = htons(udp_len); - uh->check = 0; - skb_put(skb, sizeof(struct udphdr)); - - /* Build L2TP header */ - pppol2tp_build_l2tp_header(session, skb->data); - skb_put(skb, hdr_len); - - /* Add PPP header */ - skb->data[0] = ppph[0]; - skb->data[1] = ppph[1]; - skb_put(skb, 2); - - /* Copy user data into skb */ - error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); - if (error < 0) { - kfree_skb(skb); - goto error_put_sess_tun; - } - skb_put(skb, total_len); - - /* Calculate UDP checksum if configured to do so */ - if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) - skb->ip_summed = CHECKSUM_NONE; - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { - skb->ip_summed = CHECKSUM_COMPLETE; - csum = skb_checksum(skb, 0, udp_len, 0); - uh->check = csum_tcpudp_magic(inet->inet_saddr, - inet->inet_daddr, - udp_len, IPPROTO_UDP, csum); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - } else { - skb->ip_summed = CHECKSUM_PARTIAL; - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - uh->check = ~csum_tcpudp_magic(inet->inet_saddr, - inet->inet_daddr, - udp_len, IPPROTO_UDP, 0); - } - - /* Debug */ - if (session->send_seq) - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes, ns=%hu\n", session->name, - total_len, session->ns - 1); - else - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: send %Zd bytes\n", session->name, total_len); - - if (session->debug & PPPOL2TP_MSG_DATA) { - int i; - unsigned char *datap = skb->data; - - printk(KERN_DEBUG "%s: xmit:", session->name); - for (i = 0; i < total_len; i++) { - printk(" %02X", *datap++); - if (i == 15) { - printk(" ..."); - break; - } - } - printk("\n"); - } - - /* Queue the packet to IP for output */ - len = skb->len; - error = ip_queue_xmit(skb, 1); - - /* Update stats */ - if (error >= 0) { - tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += len; - session->stats.tx_packets++; - session->stats.tx_bytes += len; - } else { - tunnel->stats.tx_errors++; - session->stats.tx_errors++; - } - - return error; - -error_put_sess_tun: - sock_put(session->tunnel_sock); -error_put_sess: - sock_put(sk); -error: - return error; -} - -/* Automatically called when the skb is freed. - */ -static void pppol2tp_sock_wfree(struct sk_buff *skb) -{ - sock_put(skb->sk); -} - -/* For data skbs that we transmit, we associate with the tunnel socket - * but don't do accounting. - */ -static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) -{ - sock_hold(sk); - skb->sk = sk; - skb->destructor = pppol2tp_sock_wfree; -} - -/* Transmit function called by generic PPP driver. Sends PPP frame - * over PPPoL2TP socket. - * - * This is almost the same as pppol2tp_sendmsg(), but rather than - * being called with a msghdr from userspace, it is called with a skb - * from the kernel. - * - * The supplied skb from ppp doesn't have enough headroom for the - * insertion of L2TP, UDP and IP headers so we need to allocate more - * headroom in the skb. This will create a cloned skb. But we must be - * careful in the error case because the caller will expect to free - * the skb it supplied, not our cloned skb. So we take care to always - * leave the original skb unfreed if we return an error. - */ -static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) -{ - static const u8 ppph[2] = { 0xff, 0x03 }; - struct sock *sk = (struct sock *) chan->private; - struct sock *sk_tun; - int hdr_len; - u16 udp_len; - struct pppol2tp_session *session; - struct pppol2tp_tunnel *tunnel; - int rc; - int headroom; - int data_len = skb->len; - struct inet_sock *inet; - __wsum csum; - struct udphdr *uh; - unsigned int len; - int old_headroom; - int new_headroom; - - if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) - goto abort; - - /* Get session and tunnel contexts from the socket */ - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto abort; - - sk_tun = session->tunnel_sock; - if (sk_tun == NULL) - goto abort_put_sess; - tunnel = pppol2tp_sock_to_tunnel(sk_tun); - if (tunnel == NULL) - goto abort_put_sess; - - /* What header length is configured for this session? */ - hdr_len = pppol2tp_l2tp_header_len(session); - - /* Check that there's enough headroom in the skb to insert IP, - * UDP and L2TP and PPP headers. If not enough, expand it to - * make room. Adjust truesize. - */ - headroom = NET_SKB_PAD + sizeof(struct iphdr) + - sizeof(struct udphdr) + hdr_len + sizeof(ppph); - old_headroom = skb_headroom(skb); - if (skb_cow_head(skb, headroom)) - goto abort_put_sess_tun; - - new_headroom = skb_headroom(skb); - skb_orphan(skb); - skb->truesize += new_headroom - old_headroom; - - /* Setup PPP header */ - __skb_push(skb, sizeof(ppph)); - skb->data[0] = ppph[0]; - skb->data[1] = ppph[1]; - - /* Setup L2TP header */ - pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len)); - - udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len; - - /* Setup UDP header */ - inet = inet_sk(sk_tun); - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - uh->source = inet->inet_sport; - uh->dest = inet->inet_dport; - uh->len = htons(udp_len); - uh->check = 0; - - /* Debug */ - if (session->send_seq) - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: send %d bytes, ns=%hu\n", session->name, - data_len, session->ns - 1); - else - PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, - "%s: send %d bytes\n", session->name, data_len); - - if (session->debug & PPPOL2TP_MSG_DATA) { - int i; - unsigned char *datap = skb->data; - - printk(KERN_DEBUG "%s: xmit:", session->name); - for (i = 0; i < data_len; i++) { - printk(" %02X", *datap++); - if (i == 31) { - printk(" ..."); - break; - } - } - printk("\n"); - } - - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | - IPSKB_REROUTED); - nf_reset(skb); - - /* Get routing info from the tunnel socket */ - skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun))); - pppol2tp_skb_set_owner_w(skb, sk_tun); - - /* Calculate UDP checksum if configured to do so */ - if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) - skb->ip_summed = CHECKSUM_NONE; - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { - skb->ip_summed = CHECKSUM_COMPLETE; - csum = skb_checksum(skb, 0, udp_len, 0); - uh->check = csum_tcpudp_magic(inet->inet_saddr, - inet->inet_daddr, - udp_len, IPPROTO_UDP, csum); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - } else { - skb->ip_summed = CHECKSUM_PARTIAL; - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - uh->check = ~csum_tcpudp_magic(inet->inet_saddr, - inet->inet_daddr, - udp_len, IPPROTO_UDP, 0); - } - - /* Queue the packet to IP for output */ - len = skb->len; - rc = ip_queue_xmit(skb, 1); - - /* Update stats */ - if (rc >= 0) { - tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += len; - session->stats.tx_packets++; - session->stats.tx_bytes += len; - } else { - tunnel->stats.tx_errors++; - session->stats.tx_errors++; - } - - sock_put(sk_tun); - sock_put(sk); - return 1; - -abort_put_sess_tun: - sock_put(sk_tun); -abort_put_sess: - sock_put(sk); -abort: - /* Free the original skb */ - kfree_skb(skb); - return 1; -} - -/***************************************************************************** - * Session (and tunnel control) socket create/destroy. - *****************************************************************************/ - -/* When the tunnel UDP socket is closed, all the attached sockets need to go - * too. - */ -static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel) -{ - int hash; - struct hlist_node *walk; - struct hlist_node *tmp; - struct pppol2tp_session *session; - struct sock *sk; - - BUG_ON(tunnel == NULL); - - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: closing all sessions...\n", tunnel->name); - - write_lock_bh(&tunnel->hlist_lock); - for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { -again: - hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { - struct sk_buff *skb; - - session = hlist_entry(walk, struct pppol2tp_session, hlist); - - sk = session->sock; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: closing session\n", session->name); - - hlist_del_init(&session->hlist); - - /* Since we should hold the sock lock while - * doing any unbinding, we need to release the - * lock we're holding before taking that lock. - * Hold a reference to the sock so it doesn't - * disappear as we're jumping between locks. - */ - sock_hold(sk); - write_unlock_bh(&tunnel->hlist_lock); - lock_sock(sk); - - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { - pppox_unbind_sock(sk); - sk->sk_state = PPPOX_DEAD; - sk->sk_state_change(sk); - } - - /* Purge any queued data */ - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - while ((skb = skb_dequeue(&session->reorder_q))) { - kfree_skb(skb); - sock_put(sk); - } - - release_sock(sk); - sock_put(sk); - - /* Now restart from the beginning of this hash - * chain. We always remove a session from the - * list so we are guaranteed to make forward - * progress. - */ - write_lock_bh(&tunnel->hlist_lock); - goto again; - } - } - write_unlock_bh(&tunnel->hlist_lock); -} - -/* Really kill the tunnel. - * Come here only when all sessions have been cleared from the tunnel. - */ -static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) -{ - struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net); - - /* Remove from socket list */ - write_lock_bh(&pn->pppol2tp_tunnel_list_lock); - list_del_init(&tunnel->list); - write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); - - atomic_dec(&pppol2tp_tunnel_count); - kfree(tunnel); -} - -/* Tunnel UDP socket destruct hook. - * The tunnel context is deleted only when all session sockets have been - * closed. - */ -static void pppol2tp_tunnel_destruct(struct sock *sk) -{ - struct pppol2tp_tunnel *tunnel; - - tunnel = sk->sk_user_data; - if (tunnel == NULL) - goto end; - - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: closing...\n", tunnel->name); - - /* Close all sessions */ - pppol2tp_tunnel_closeall(tunnel); - - /* No longer an encapsulation socket. See net/ipv4/udp.c */ - (udp_sk(sk))->encap_type = 0; - (udp_sk(sk))->encap_rcv = NULL; - - /* Remove hooks into tunnel socket */ - tunnel->sock = NULL; - sk->sk_destruct = tunnel->old_sk_destruct; - sk->sk_user_data = NULL; - - /* Call original (UDP) socket descructor */ - if (sk->sk_destruct != NULL) - (*sk->sk_destruct)(sk); - - pppol2tp_tunnel_dec_refcount(tunnel); - -end: - return; -} - -/* Really kill the session socket. (Called from sock_put() if - * refcnt == 0.) - */ -static void pppol2tp_session_destruct(struct sock *sk) -{ - struct pppol2tp_session *session = NULL; - - if (sk->sk_user_data != NULL) { - struct pppol2tp_tunnel *tunnel; - - session = sk->sk_user_data; - if (session == NULL) - goto out; - - BUG_ON(session->magic != L2TP_SESSION_MAGIC); - - /* Don't use pppol2tp_sock_to_tunnel() here to - * get the tunnel context because the tunnel - * socket might have already been closed (its - * sk->sk_user_data will be NULL) so use the - * session's private tunnel ptr instead. - */ - tunnel = session->tunnel; - if (tunnel != NULL) { - BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); - - /* If session_id is zero, this is a null - * session context, which was created for a - * socket that is being used only to manage - * tunnels. - */ - if (session->tunnel_addr.s_session != 0) { - /* Delete the session socket from the - * hash - */ - write_lock_bh(&tunnel->hlist_lock); - hlist_del_init(&session->hlist); - write_unlock_bh(&tunnel->hlist_lock); - - atomic_dec(&pppol2tp_session_count); - } - - /* This will delete the tunnel context if this - * is the last session on the tunnel. - */ - session->tunnel = NULL; - session->tunnel_sock = NULL; - pppol2tp_tunnel_dec_refcount(tunnel); - } - } - - kfree(session); -out: - return; -} - -/* Called when the PPPoX socket (session) is closed. - */ -static int pppol2tp_release(struct socket *sock) -{ - struct sock *sk = sock->sk; - struct pppol2tp_session *session; - int error; - - if (!sk) - return 0; - - error = -EBADF; - lock_sock(sk); - if (sock_flag(sk, SOCK_DEAD) != 0) - goto error; - - pppox_unbind_sock(sk); - - /* Signal the death of the socket. */ - sk->sk_state = PPPOX_DEAD; - sock_orphan(sk); - sock->sk = NULL; - - session = pppol2tp_sock_to_session(sk); - - /* Purge any queued data */ - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - if (session != NULL) { - struct sk_buff *skb; - while ((skb = skb_dequeue(&session->reorder_q))) { - kfree_skb(skb); - sock_put(sk); - } - sock_put(sk); - } - - release_sock(sk); - - /* This will delete the session context via - * pppol2tp_session_destruct() if the socket's refcnt drops to - * zero. - */ - sock_put(sk); - - return 0; - -error: - release_sock(sk); - return error; -} - -/* Internal function to prepare a tunnel (UDP) socket to have PPPoX - * sockets attached to it. - */ -static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net, - int fd, u16 tunnel_id, int *error) -{ - int err; - struct socket *sock = NULL; - struct sock *sk; - struct pppol2tp_tunnel *tunnel; - struct pppol2tp_net *pn; - struct sock *ret = NULL; - - /* Get the tunnel UDP socket from the fd, which was opened by - * the userspace L2TP daemon. - */ - err = -EBADF; - sock = sockfd_lookup(fd, &err); - if (!sock) { - PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, - "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", - tunnel_id, fd, err); - goto err; - } - - sk = sock->sk; - - /* Quick sanity checks */ - err = -EPROTONOSUPPORT; - if (sk->sk_protocol != IPPROTO_UDP) { - PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, - "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", - tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); - goto err; - } - err = -EAFNOSUPPORT; - if (sock->ops->family != AF_INET) { - PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, - "tunl %hu: fd %d wrong family, got %d, expected %d\n", - tunnel_id, fd, sock->ops->family, AF_INET); - goto err; - } - - err = -ENOTCONN; - - /* Check if this socket has already been prepped */ - tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data; - if (tunnel != NULL) { - /* User-data field already set */ - err = -EBUSY; - BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); - - /* This socket has already been prepped */ - ret = tunnel->sock; - goto out; - } - - /* This socket is available and needs prepping. Create a new tunnel - * context and init it. - */ - sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL); - if (sk->sk_user_data == NULL) { - err = -ENOMEM; - goto err; - } - - tunnel->magic = L2TP_TUNNEL_MAGIC; - sprintf(&tunnel->name[0], "tunl %hu", tunnel_id); - - tunnel->stats.tunnel_id = tunnel_id; - tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS; - - /* Hook on the tunnel socket destructor so that we can cleanup - * if the tunnel socket goes away. - */ - tunnel->old_sk_destruct = sk->sk_destruct; - sk->sk_destruct = pppol2tp_tunnel_destruct; - - tunnel->sock = sk; - sk->sk_allocation = GFP_ATOMIC; - - /* Misc init */ - rwlock_init(&tunnel->hlist_lock); - - /* The net we belong to */ - tunnel->pppol2tp_net = net; - pn = pppol2tp_pernet(net); - - /* Add tunnel to our list */ - INIT_LIST_HEAD(&tunnel->list); - write_lock_bh(&pn->pppol2tp_tunnel_list_lock); - list_add(&tunnel->list, &pn->pppol2tp_tunnel_list); - write_unlock_bh(&pn->pppol2tp_tunnel_list_lock); - atomic_inc(&pppol2tp_tunnel_count); - - /* Bump the reference count. The tunnel context is deleted - * only when this drops to zero. - */ - pppol2tp_tunnel_inc_refcount(tunnel); - - /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ - (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP; - (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv; - - ret = tunnel->sock; - - *error = 0; -out: - if (sock) - sockfd_put(sock); - - return ret; - -err: - *error = err; - goto out; -} - -static struct proto pppol2tp_sk_proto = { - .name = "PPPOL2TP", - .owner = THIS_MODULE, - .obj_size = sizeof(struct pppox_sock), -}; - -/* socket() handler. Initialize a new struct sock. - */ -static int pppol2tp_create(struct net *net, struct socket *sock) -{ - int error = -ENOMEM; - struct sock *sk; - - sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); - if (!sk) - goto out; - - sock_init_data(sock, sk); - - sock->state = SS_UNCONNECTED; - sock->ops = &pppol2tp_ops; - - sk->sk_backlog_rcv = pppol2tp_recv_core; - sk->sk_protocol = PX_PROTO_OL2TP; - sk->sk_family = PF_PPPOX; - sk->sk_state = PPPOX_NONE; - sk->sk_type = SOCK_STREAM; - sk->sk_destruct = pppol2tp_session_destruct; - - error = 0; - -out: - return error; -} - -/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket - */ -static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, - int sockaddr_len, int flags) -{ - struct sock *sk = sock->sk; - struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; - struct pppox_sock *po = pppox_sk(sk); - struct sock *tunnel_sock = NULL; - struct pppol2tp_session *session = NULL; - struct pppol2tp_tunnel *tunnel; - struct dst_entry *dst; - int error = 0; - - lock_sock(sk); - - error = -EINVAL; - if (sp->sa_protocol != PX_PROTO_OL2TP) - goto end; - - /* Check for already bound sockets */ - error = -EBUSY; - if (sk->sk_state & PPPOX_CONNECTED) - goto end; - - /* We don't supporting rebinding anyway */ - error = -EALREADY; - if (sk->sk_user_data) - goto end; /* socket is already attached */ - - /* Don't bind if s_tunnel is 0 */ - error = -EINVAL; - if (sp->pppol2tp.s_tunnel == 0) - goto end; - - /* Special case: prepare tunnel socket if s_session and - * d_session is 0. Otherwise look up tunnel using supplied - * tunnel id. - */ - if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { - tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk), - sp->pppol2tp.fd, - sp->pppol2tp.s_tunnel, - &error); - if (tunnel_sock == NULL) - goto end; - - tunnel = tunnel_sock->sk_user_data; - } else { - tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel); - - /* Error if we can't find the tunnel */ - error = -ENOENT; - if (tunnel == NULL) - goto end; - - tunnel_sock = tunnel->sock; - } - - /* Check that this session doesn't already exist */ - error = -EEXIST; - session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session); - if (session != NULL) - goto end; - - /* Allocate and initialize a new session context. */ - session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL); - if (session == NULL) { - error = -ENOMEM; - goto end; - } - - skb_queue_head_init(&session->reorder_q); - - session->magic = L2TP_SESSION_MAGIC; - session->owner = current->pid; - session->sock = sk; - session->tunnel = tunnel; - session->tunnel_sock = tunnel_sock; - session->tunnel_addr = sp->pppol2tp; - sprintf(&session->name[0], "sess %hu/%hu", - session->tunnel_addr.s_tunnel, - session->tunnel_addr.s_session); - - session->stats.tunnel_id = session->tunnel_addr.s_tunnel; - session->stats.session_id = session->tunnel_addr.s_session; - - INIT_HLIST_NODE(&session->hlist); - - /* Inherit debug options from tunnel */ - session->debug = tunnel->debug; - - /* Default MTU must allow space for UDP/L2TP/PPP - * headers. - */ - session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; - - /* If PMTU discovery was enabled, use the MTU that was discovered */ - dst = sk_dst_get(sk); - if (dst != NULL) { - u32 pmtu = dst_mtu(__sk_dst_get(sk)); - if (pmtu != 0) - session->mtu = session->mru = pmtu - - PPPOL2TP_HEADER_OVERHEAD; - dst_release(dst); - } - - /* Special case: if source & dest session_id == 0x0000, this socket is - * being created to manage the tunnel. Don't add the session to the - * session hash list, just set up the internal context for use by - * ioctl() and sockopt() handlers. - */ - if ((session->tunnel_addr.s_session == 0) && - (session->tunnel_addr.d_session == 0)) { - error = 0; - sk->sk_user_data = session; - goto out_no_ppp; - } - - /* Get tunnel context from the tunnel socket */ - tunnel = pppol2tp_sock_to_tunnel(tunnel_sock); - if (tunnel == NULL) { - error = -EBADF; - goto end; - } - - /* Right now, because we don't have a way to push the incoming skb's - * straight through the UDP layer, the only header we need to worry - * about is the L2TP header. This size is different depending on - * whether sequence numbers are enabled for the data channel. - */ - po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; - - po->chan.private = sk; - po->chan.ops = &pppol2tp_chan_ops; - po->chan.mtu = session->mtu; - - error = ppp_register_net_channel(sock_net(sk), &po->chan); - if (error) - goto end_put_tun; - - /* This is how we get the session context from the socket. */ - sk->sk_user_data = session; - - /* Add session to the tunnel's hash list */ - write_lock_bh(&tunnel->hlist_lock); - hlist_add_head(&session->hlist, - pppol2tp_session_id_hash(tunnel, - session->tunnel_addr.s_session)); - write_unlock_bh(&tunnel->hlist_lock); - - atomic_inc(&pppol2tp_session_count); - -out_no_ppp: - pppol2tp_tunnel_inc_refcount(tunnel); - sk->sk_state = PPPOX_CONNECTED; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: created\n", session->name); - -end_put_tun: - sock_put(tunnel_sock); -end: - release_sock(sk); - - if (error != 0) { - if (session) - PRINTK(session->debug, - PPPOL2TP_MSG_CONTROL, KERN_WARNING, - "%s: connect failed: %d\n", - session->name, error); - else - PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, - "connect failed: %d\n", error); - } - - return error; -} - -/* getname() support. - */ -static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, - int *usockaddr_len, int peer) -{ - int len = sizeof(struct sockaddr_pppol2tp); - struct sockaddr_pppol2tp sp; - int error = 0; - struct pppol2tp_session *session; - - error = -ENOTCONN; - if (sock->sk->sk_state != PPPOX_CONNECTED) - goto end; - - session = pppol2tp_sock_to_session(sock->sk); - if (session == NULL) { - error = -EBADF; - goto end; - } - - sp.sa_family = AF_PPPOX; - sp.sa_protocol = PX_PROTO_OL2TP; - memcpy(&sp.pppol2tp, &session->tunnel_addr, - sizeof(struct pppol2tp_addr)); - - memcpy(uaddr, &sp, len); - - *usockaddr_len = len; - - error = 0; - sock_put(sock->sk); - -end: - return error; -} - -/**************************************************************************** - * ioctl() handlers. - * - * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP - * sockets. However, in order to control kernel tunnel features, we allow - * userspace to create a special "tunnel" PPPoX socket which is used for - * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow - * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() - * calls. - ****************************************************************************/ - -/* Session ioctl helper. - */ -static int pppol2tp_session_ioctl(struct pppol2tp_session *session, - unsigned int cmd, unsigned long arg) -{ - struct ifreq ifr; - int err = 0; - struct sock *sk = session->sock; - int val = (int) arg; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", - session->name, cmd, arg); - - sock_hold(sk); - - switch (cmd) { - case SIOCGIFMTU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) - break; - ifr.ifr_mtu = session->mtu; - if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) - break; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mtu=%d\n", session->name, session->mtu); - err = 0; - break; - - case SIOCSIFMTU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) - break; - - session->mtu = ifr.ifr_mtu; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mtu=%d\n", session->name, session->mtu); - err = 0; - break; - - case PPPIOCGMRU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (put_user(session->mru, (int __user *) arg)) - break; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get mru=%d\n", session->name, session->mru); - err = 0; - break; - - case PPPIOCSMRU: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - err = -EFAULT; - if (get_user(val,(int __user *) arg)) - break; - - session->mru = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set mru=%d\n", session->name, session->mru); - err = 0; - break; - - case PPPIOCGFLAGS: - err = -EFAULT; - if (put_user(session->flags, (int __user *) arg)) - break; - - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get flags=%d\n", session->name, session->flags); - err = 0; - break; - - case PPPIOCSFLAGS: - err = -EFAULT; - if (get_user(val, (int __user *) arg)) - break; - session->flags = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set flags=%d\n", session->name, session->flags); - err = 0; - break; - - case PPPIOCGL2TPSTATS: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - if (copy_to_user((void __user *) arg, &session->stats, - sizeof(session->stats))) - break; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", session->name); - err = 0; - break; - - default: - err = -ENOSYS; - break; - } - - sock_put(sk); - - return err; -} - -/* Tunnel ioctl helper. - * - * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data - * specifies a session_id, the session ioctl handler is called. This allows an - * application to retrieve session stats via a tunnel socket. - */ -static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel, - unsigned int cmd, unsigned long arg) -{ - int err = 0; - struct sock *sk = tunnel->sock; - struct pppol2tp_ioc_stats stats_req; - - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, - "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name, - cmd, arg); - - sock_hold(sk); - - switch (cmd) { - case PPPIOCGL2TPSTATS: - err = -ENXIO; - if (!(sk->sk_state & PPPOX_CONNECTED)) - break; - - if (copy_from_user(&stats_req, (void __user *) arg, - sizeof(stats_req))) { - err = -EFAULT; - break; - } - if (stats_req.session_id != 0) { - /* resend to session ioctl handler */ - struct pppol2tp_session *session = - pppol2tp_session_find(tunnel, stats_req.session_id); - if (session != NULL) - err = pppol2tp_session_ioctl(session, cmd, arg); - else - err = -EBADR; - break; - } -#ifdef CONFIG_XFRM - tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; -#endif - if (copy_to_user((void __user *) arg, &tunnel->stats, - sizeof(tunnel->stats))) { - err = -EFAULT; - break; - } - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get L2TP stats\n", tunnel->name); - err = 0; - break; - - default: - err = -ENOSYS; - break; - } - - sock_put(sk); - - return err; -} - -/* Main ioctl() handler. - * Dispatch to tunnel or session helpers depending on the socket. - */ -static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) -{ - struct sock *sk = sock->sk; - struct pppol2tp_session *session; - struct pppol2tp_tunnel *tunnel; - int err; - - if (!sk) - return 0; - - err = -EBADF; - if (sock_flag(sk, SOCK_DEAD) != 0) - goto end; - - err = -ENOTCONN; - if ((sk->sk_user_data == NULL) || - (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) - goto end; - - /* Get session context from the socket */ - err = -EBADF; - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto end; - - /* Special case: if session's session_id is zero, treat ioctl as a - * tunnel ioctl - */ - if ((session->tunnel_addr.s_session == 0) && - (session->tunnel_addr.d_session == 0)) { - err = -EBADF; - tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); - if (tunnel == NULL) - goto end_put_sess; - - err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); - sock_put(session->tunnel_sock); - goto end_put_sess; - } - - err = pppol2tp_session_ioctl(session, cmd, arg); - -end_put_sess: - sock_put(sk); -end: - return err; -} - -/***************************************************************************** - * setsockopt() / getsockopt() support. - * - * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP - * sockets. In order to control kernel tunnel features, we allow userspace to - * create a special "tunnel" PPPoX socket which is used for control only. - * Tunnel PPPoX sockets have session_id == 0 and simply allow the user - * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. - *****************************************************************************/ - -/* Tunnel setsockopt() helper. - */ -static int pppol2tp_tunnel_setsockopt(struct sock *sk, - struct pppol2tp_tunnel *tunnel, - int optname, int val) -{ - int err = 0; - - switch (optname) { - case PPPOL2TP_SO_DEBUG: - tunnel->debug = val; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", tunnel->name, tunnel->debug); - break; - - default: - err = -ENOPROTOOPT; - break; - } - - return err; -} - -/* Session setsockopt helper. - */ -static int pppol2tp_session_setsockopt(struct sock *sk, - struct pppol2tp_session *session, - int optname, int val) -{ - int err = 0; - - switch (optname) { - case PPPOL2TP_SO_RECVSEQ: - if ((val != 0) && (val != 1)) { - err = -EINVAL; - break; - } - session->recv_seq = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set recv_seq=%d\n", session->name, - session->recv_seq); - break; - - case PPPOL2TP_SO_SENDSEQ: - if ((val != 0) && (val != 1)) { - err = -EINVAL; - break; - } - session->send_seq = val ? -1 : 0; - { - struct sock *ssk = session->sock; - struct pppox_sock *po = pppox_sk(ssk); - po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : - PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; - } - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set send_seq=%d\n", session->name, session->send_seq); - break; - - case PPPOL2TP_SO_LNSMODE: - if ((val != 0) && (val != 1)) { - err = -EINVAL; - break; - } - session->lns_mode = val ? -1 : 0; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set lns_mode=%d\n", session->name, - session->lns_mode); - break; - - case PPPOL2TP_SO_DEBUG: - session->debug = val; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set debug=%x\n", session->name, session->debug); - break; - - case PPPOL2TP_SO_REORDERTO: - session->reorder_timeout = msecs_to_jiffies(val); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: set reorder_timeout=%d\n", session->name, - session->reorder_timeout); - break; - - default: - err = -ENOPROTOOPT; - break; - } - - return err; -} - -/* Main setsockopt() entry point. - * Does API checks, then calls either the tunnel or session setsockopt - * handler, according to whether the PPPoL2TP socket is a for a regular - * session or the special tunnel type. - */ -static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct sock *sk = sock->sk; - struct pppol2tp_session *session = sk->sk_user_data; - struct pppol2tp_tunnel *tunnel; - int val; - int err; - - if (level != SOL_PPPOL2TP) - return udp_prot.setsockopt(sk, level, optname, optval, optlen); - - if (optlen < sizeof(int)) - return -EINVAL; - - if (get_user(val, (int __user *)optval)) - return -EFAULT; - - err = -ENOTCONN; - if (sk->sk_user_data == NULL) - goto end; - - /* Get session context from the socket */ - err = -EBADF; - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto end; - - /* Special case: if session_id == 0x0000, treat as operation on tunnel - */ - if ((session->tunnel_addr.s_session == 0) && - (session->tunnel_addr.d_session == 0)) { - err = -EBADF; - tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); - if (tunnel == NULL) - goto end_put_sess; - - err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); - sock_put(session->tunnel_sock); - } else - err = pppol2tp_session_setsockopt(sk, session, optname, val); - - err = 0; - -end_put_sess: - sock_put(sk); -end: - return err; -} - -/* Tunnel getsockopt helper. Called with sock locked. - */ -static int pppol2tp_tunnel_getsockopt(struct sock *sk, - struct pppol2tp_tunnel *tunnel, - int optname, int *val) -{ - int err = 0; - - switch (optname) { - case PPPOL2TP_SO_DEBUG: - *val = tunnel->debug; - PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%x\n", tunnel->name, tunnel->debug); - break; - - default: - err = -ENOPROTOOPT; - break; - } - - return err; -} - -/* Session getsockopt helper. Called with sock locked. - */ -static int pppol2tp_session_getsockopt(struct sock *sk, - struct pppol2tp_session *session, - int optname, int *val) -{ - int err = 0; - - switch (optname) { - case PPPOL2TP_SO_RECVSEQ: - *val = session->recv_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get recv_seq=%d\n", session->name, *val); - break; - - case PPPOL2TP_SO_SENDSEQ: - *val = session->send_seq; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get send_seq=%d\n", session->name, *val); - break; - - case PPPOL2TP_SO_LNSMODE: - *val = session->lns_mode; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get lns_mode=%d\n", session->name, *val); - break; - - case PPPOL2TP_SO_DEBUG: - *val = session->debug; - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get debug=%d\n", session->name, *val); - break; - - case PPPOL2TP_SO_REORDERTO: - *val = (int) jiffies_to_msecs(session->reorder_timeout); - PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, - "%s: get reorder_timeout=%d\n", session->name, *val); - break; - - default: - err = -ENOPROTOOPT; - } - - return err; -} - -/* Main getsockopt() entry point. - * Does API checks, then calls either the tunnel or session getsockopt - * handler, according to whether the PPPoX socket is a for a regular session - * or the special tunnel type. - */ -static int pppol2tp_getsockopt(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen) -{ - struct sock *sk = sock->sk; - struct pppol2tp_session *session = sk->sk_user_data; - struct pppol2tp_tunnel *tunnel; - int val, len; - int err; - - if (level != SOL_PPPOL2TP) - return udp_prot.getsockopt(sk, level, optname, optval, optlen); - - if (get_user(len, (int __user *) optlen)) - return -EFAULT; - - len = min_t(unsigned int, len, sizeof(int)); - - if (len < 0) - return -EINVAL; - - err = -ENOTCONN; - if (sk->sk_user_data == NULL) - goto end; - - /* Get the session context */ - err = -EBADF; - session = pppol2tp_sock_to_session(sk); - if (session == NULL) - goto end; - - /* Special case: if session_id == 0x0000, treat as operation on tunnel */ - if ((session->tunnel_addr.s_session == 0) && - (session->tunnel_addr.d_session == 0)) { - err = -EBADF; - tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); - if (tunnel == NULL) - goto end_put_sess; - - err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); - sock_put(session->tunnel_sock); - } else - err = pppol2tp_session_getsockopt(sk, session, optname, &val); - - err = -EFAULT; - if (put_user(len, (int __user *) optlen)) - goto end_put_sess; - - if (copy_to_user((void __user *) optval, &val, len)) - goto end_put_sess; - - err = 0; - -end_put_sess: - sock_put(sk); -end: - return err; -} - -/***************************************************************************** - * /proc filesystem for debug - *****************************************************************************/ - -#ifdef CONFIG_PROC_FS - -#include <linux/seq_file.h> - -struct pppol2tp_seq_data { - struct seq_net_private p; - struct pppol2tp_tunnel *tunnel; /* current tunnel */ - struct pppol2tp_session *session; /* NULL means get first session in tunnel */ -}; - -static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) -{ - struct pppol2tp_session *session = NULL; - struct hlist_node *walk; - int found = 0; - int next = 0; - int i; - - read_lock_bh(&tunnel->hlist_lock); - for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) { - hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) { - if (curr == NULL) { - found = 1; - goto out; - } - if (session == curr) { - next = 1; - continue; - } - if (next) { - found = 1; - goto out; - } - } - } -out: - read_unlock_bh(&tunnel->hlist_lock); - if (!found) - session = NULL; - - return session; -} - -static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn, - struct pppol2tp_tunnel *curr) -{ - struct pppol2tp_tunnel *tunnel = NULL; - - read_lock_bh(&pn->pppol2tp_tunnel_list_lock); - if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) { - goto out; - } - tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); -out: - read_unlock_bh(&pn->pppol2tp_tunnel_list_lock); - - return tunnel; -} - -static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) -{ - struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; - struct pppol2tp_net *pn; - loff_t pos = *offs; - - if (!pos) - goto out; - - BUG_ON(m->private == NULL); - pd = m->private; - pn = pppol2tp_pernet(seq_file_net(m)); - - if (pd->tunnel == NULL) { - if (!list_empty(&pn->pppol2tp_tunnel_list)) - pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); - } else { - pd->session = next_session(pd->tunnel, pd->session); - if (pd->session == NULL) { - pd->tunnel = next_tunnel(pn, pd->tunnel); - } - } - - /* NULL tunnel and session indicates end of list */ - if ((pd->tunnel == NULL) && (pd->session == NULL)) - pd = NULL; - -out: - return pd; -} - -static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) -{ - (*pos)++; - return NULL; -} - -static void pppol2tp_seq_stop(struct seq_file *p, void *v) -{ - /* nothing to do */ -} - -static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) -{ - struct pppol2tp_tunnel *tunnel = v; - - seq_printf(m, "\nTUNNEL '%s', %c %d\n", - tunnel->name, - (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N', - atomic_read(&tunnel->ref_count) - 1); - seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", - tunnel->debug, - (unsigned long long)tunnel->stats.tx_packets, - (unsigned long long)tunnel->stats.tx_bytes, - (unsigned long long)tunnel->stats.tx_errors, - (unsigned long long)tunnel->stats.rx_packets, - (unsigned long long)tunnel->stats.rx_bytes, - (unsigned long long)tunnel->stats.rx_errors); -} - -static void pppol2tp_seq_session_show(struct seq_file *m, void *v) -{ - struct pppol2tp_session *session = v; - - seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " - "%04X/%04X %d %c\n", - session->name, - ntohl(session->tunnel_addr.addr.sin_addr.s_addr), - ntohs(session->tunnel_addr.addr.sin_port), - session->tunnel_addr.s_tunnel, - session->tunnel_addr.s_session, - session->tunnel_addr.d_tunnel, - session->tunnel_addr.d_session, - session->sock->sk_state, - (session == session->sock->sk_user_data) ? - 'Y' : 'N'); - seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", - session->mtu, session->mru, - session->recv_seq ? 'R' : '-', - session->send_seq ? 'S' : '-', - session->lns_mode ? "LNS" : "LAC", - session->debug, - jiffies_to_msecs(session->reorder_timeout)); - seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", - session->nr, session->ns, - (unsigned long long)session->stats.tx_packets, - (unsigned long long)session->stats.tx_bytes, - (unsigned long long)session->stats.tx_errors, - (unsigned long long)session->stats.rx_packets, - (unsigned long long)session->stats.rx_bytes, - (unsigned long long)session->stats.rx_errors); -} - -static int pppol2tp_seq_show(struct seq_file *m, void *v) -{ - struct pppol2tp_seq_data *pd = v; - - /* display header on line 1 */ - if (v == SEQ_START_TOKEN) { - seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); - seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); - seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); - seq_puts(m, " SESSION name, addr/port src-tid/sid " - "dest-tid/sid state user-data-ok\n"); - seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); - seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); - goto out; - } - - /* Show the tunnel or session context. - */ - if (pd->session == NULL) - pppol2tp_seq_tunnel_show(m, pd->tunnel); - else - pppol2tp_seq_session_show(m, pd->session); - -out: - return 0; -} - -static const struct seq_operations pppol2tp_seq_ops = { - .start = pppol2tp_seq_start, - .next = pppol2tp_seq_next, - .stop = pppol2tp_seq_stop, - .show = pppol2tp_seq_show, -}; - -/* Called when our /proc file is opened. We allocate data for use when - * iterating our tunnel / session contexts and store it in the private - * data of the seq_file. - */ -static int pppol2tp_proc_open(struct inode *inode, struct file *file) -{ - return seq_open_net(inode, file, &pppol2tp_seq_ops, - sizeof(struct pppol2tp_seq_data)); -} - -static const struct file_operations pppol2tp_proc_fops = { - .owner = THIS_MODULE, - .open = pppol2tp_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_net, -}; - -#endif /* CONFIG_PROC_FS */ - -/***************************************************************************** - * Init and cleanup - *****************************************************************************/ - -static const struct proto_ops pppol2tp_ops = { - .family = AF_PPPOX, - .owner = THIS_MODULE, - .release = pppol2tp_release, - .bind = sock_no_bind, - .connect = pppol2tp_connect, - .socketpair = sock_no_socketpair, - .accept = sock_no_accept, - .getname = pppol2tp_getname, - .poll = datagram_poll, - .listen = sock_no_listen, - .shutdown = sock_no_shutdown, - .setsockopt = pppol2tp_setsockopt, - .getsockopt = pppol2tp_getsockopt, - .sendmsg = pppol2tp_sendmsg, - .recvmsg = pppol2tp_recvmsg, - .mmap = sock_no_mmap, - .ioctl = pppox_ioctl, -}; - -static struct pppox_proto pppol2tp_proto = { - .create = pppol2tp_create, - .ioctl = pppol2tp_ioctl -}; - -static __net_init int pppol2tp_init_net(struct net *net) -{ - struct pppol2tp_net *pn = pppol2tp_pernet(net); - struct proc_dir_entry *pde; - - INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list); - rwlock_init(&pn->pppol2tp_tunnel_list_lock); - - pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); -#ifdef CONFIG_PROC_FS - if (!pde) - return -ENOMEM; -#endif - - return 0; -} - -static __net_exit void pppol2tp_exit_net(struct net *net) -{ - proc_net_remove(net, "pppol2tp"); -} - -static struct pernet_operations pppol2tp_net_ops = { - .init = pppol2tp_init_net, - .exit = pppol2tp_exit_net, - .id = &pppol2tp_net_id, - .size = sizeof(struct pppol2tp_net), -}; - -static int __init pppol2tp_init(void) -{ - int err; - - err = proto_register(&pppol2tp_sk_proto, 0); - if (err) - goto out; - err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); - if (err) - goto out_unregister_pppol2tp_proto; - - err = register_pernet_device(&pppol2tp_net_ops); - if (err) - goto out_unregister_pppox_proto; - - printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", - PPPOL2TP_DRV_VERSION); - -out: - return err; -out_unregister_pppox_proto: - unregister_pppox_proto(PX_PROTO_OL2TP); -out_unregister_pppol2tp_proto: - proto_unregister(&pppol2tp_sk_proto); - goto out; -} - -static void __exit pppol2tp_exit(void) -{ - unregister_pppox_proto(PX_PROTO_OL2TP); - unregister_pernet_device(&pppol2tp_net_ops); - proto_unregister(&pppol2tp_sk_proto); -} - -module_init(pppol2tp_init); -module_exit(pppol2tp_exit); - -MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, " - "James Chapman <jchapman@katalix.com>"); -MODULE_DESCRIPTION("PPP over L2TP over UDP"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(PPPOL2TP_DRV_VERSION); diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index ac806b27c65..d4191ef9cad 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c @@ -22,7 +22,6 @@ #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/net.h> diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index a849f6f23a1..022317db467 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -30,6 +30,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -326,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, unsigned int bufsize; if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) - dev_info(ctodev(card), "%s: ERROR status \n", __func__); + dev_info(ctodev(card), "%s: ERROR status\n", __func__); /* we need to round up the buffer size to a multiple of 128 */ bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); @@ -546,7 +547,7 @@ out: void gelic_net_set_multi(struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; unsigned int i; uint8_t *p; u64 addr; @@ -580,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev) } /* set multicast addresses */ - netdev_for_each_mc_addr(mc, netdev) { + netdev_for_each_mc_addr(ha, netdev) { addr = 0; - p = mc->dmi_addr; + p = ha->addr; for (i = 0; i < ETH_ALEN; i++) { addr <<= 8; addr |= *p++; @@ -1434,7 +1435,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work) container_of(work, struct gelic_card, tx_timeout_task); struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0]; - dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); + dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__); if (!(netdev->flags & IFF_UP)) goto out; diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index f5fc0f78fc2..d4ff627c6f7 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -527,7 +528,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len, u8 item_len; u8 item_id; - pr_debug("%s: data=%p len=%ld \n", __func__, + pr_debug("%s: data=%p len=%ld\n", __func__, data, len); memset(ie_info, 0, sizeof(struct ie_info)); @@ -978,7 +979,7 @@ static int gelic_wl_set_essid(struct net_device *netdev, pr_debug("%s: essid = '%s'\n", __func__, extra); set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); } else { - pr_debug("%s: ESSID any \n", __func__); + pr_debug("%s: ESSID any\n", __func__); clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); } set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); @@ -986,7 +987,7 @@ static int gelic_wl_set_essid(struct net_device *netdev, gelic_wl_try_associate(netdev); /* FIXME */ - pr_debug("%s: -> \n", __func__); + pr_debug("%s: ->\n", __func__); return 0; } @@ -997,7 +998,7 @@ static int gelic_wl_get_essid(struct net_device *netdev, struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); unsigned long irqflag; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); mutex_lock(&wl->assoc_stat_lock); spin_lock_irqsave(&wl->lock, irqflag); if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || @@ -1010,7 +1011,7 @@ static int gelic_wl_get_essid(struct net_device *netdev, mutex_unlock(&wl->assoc_stat_lock); spin_unlock_irqrestore(&wl->lock, irqflag); - pr_debug("%s: -> len=%d \n", __func__, data->essid.length); + pr_debug("%s: -> len=%d\n", __func__, data->essid.length); return 0; } @@ -1027,7 +1028,7 @@ static int gelic_wl_set_encode(struct net_device *netdev, int key_index, index_specified; int ret = 0; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); flags = enc->flags & IW_ENCODE_FLAGS; key_index = enc->flags & IW_ENCODE_INDEX; @@ -1086,7 +1087,7 @@ static int gelic_wl_set_encode(struct net_device *netdev, set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); done: spin_unlock_irqrestore(&wl->lock, irqflag); - pr_debug("%s: -> \n", __func__); + pr_debug("%s: ->\n", __func__); return ret; } @@ -1100,7 +1101,7 @@ static int gelic_wl_get_encode(struct net_device *netdev, unsigned int key_index, index_specified; int ret = 0; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); key_index = enc->flags & IW_ENCODE_INDEX; pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, enc->flags, enc->pointer, enc->length, extra); @@ -1214,7 +1215,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev, int key_index; int ret = 0; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); flags = enc->flags & IW_ENCODE_FLAGS; alg = ext->alg; key_index = enc->flags & IW_ENCODE_INDEX; @@ -1287,7 +1288,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev, } done: spin_unlock_irqrestore(&wl->lock, irqflag); - pr_debug("%s: -> \n", __func__); + pr_debug("%s: ->\n", __func__); return ret; } @@ -1303,7 +1304,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev, int ret = 0; int max_key_len; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); max_key_len = enc->length - sizeof(struct iw_encode_ext); if (max_key_len < 0) @@ -1358,7 +1359,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev, } out: spin_unlock_irqrestore(&wl->lock, irqflag); - pr_debug("%s: -> \n", __func__); + pr_debug("%s: ->\n", __func__); return ret; } /* SIOC{S,G}IWMODE */ @@ -1369,7 +1370,7 @@ static int gelic_wl_set_mode(struct net_device *netdev, __u32 mode = data->mode; int ret; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); if (mode == IW_MODE_INFRA) ret = 0; else @@ -1383,7 +1384,7 @@ static int gelic_wl_get_mode(struct net_device *netdev, union iwreq_data *data, char *extra) { __u32 *mode = &data->mode; - pr_debug("%s: <- \n", __func__); + pr_debug("%s: <-\n", __func__); *mode = IW_MODE_INFRA; pr_debug("%s: ->\n", __func__); return 0; @@ -2021,7 +2022,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl, if (!rc) { /* timeouted. Maybe key or cyrpt mode is wrong */ - pr_info("%s: connect timeout \n", __func__); + pr_info("%s: connect timeout\n", __func__); cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); kfree(cmd); @@ -2062,7 +2063,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl, } if (desired_event == event) { - pr_debug("%s: completed \n", __func__); + pr_debug("%s: completed\n", __func__); complete(&wl->assoc_done); netif_carrier_on(port_to_netdev(wl_port(wl))); } else @@ -2316,7 +2317,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card) pr_debug("%s:start\n", __func__); netdev = alloc_etherdev(sizeof(struct gelic_port) + sizeof(struct gelic_wl_info)); - pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card); + pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card); if (!netdev) return NULL; diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 4ef0afbcbe1..01a6ca303a1 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); - pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); - pci_unmap_len_set(lrg_buf_cb, maplen, + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); } @@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); - pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); - pci_unmap_len_set(lrg_buf_cb, maplen, + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; @@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, } pci_unmap_single(qdev->pdev, - pci_unmap_addr(&tx_cb->map[0], mapaddr), - pci_unmap_len(&tx_cb->map[0], maplen), + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { pci_unmap_page(qdev->pdev, - pci_unmap_addr(&tx_cb->map[i], + dma_unmap_addr(&tx_cb->map[i], mapaddr), - pci_unmap_len(&tx_cb->map[i], maplen), + dma_unmap_len(&tx_cb->map[i], maplen), PCI_DMA_TODEVICE); } } @@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, skb_put(skb, length); pci_unmap_single(qdev->pdev, - pci_unmap_addr(lrg_buf_cb2, mapaddr), - pci_unmap_len(lrg_buf_cb2, maplen), + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb->data); skb->ip_summed = CHECKSUM_NONE; @@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, - pci_unmap_addr(lrg_buf_cb2, mapaddr), - pci_unmap_len(lrg_buf_cb2, maplen), + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb2->data); @@ -2258,7 +2258,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, "%x.\n", ndev->name, net_rsp->opcode); printk(KERN_ERR PFX - "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", + "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], @@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev, oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); - pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - pci_unmap_len_set(&tx_cb->map[seg], maplen, len); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { @@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev, oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); - pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - pci_unmap_len_set(&tx_cb->map[seg], maplen, + dma_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; @@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev, oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(frag->size); - pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); - pci_unmap_len_set(&tx_cb->map[seg], maplen, + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); } /* Terminate the last segment. */ @@ -2539,22 +2539,22 @@ map_error: (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { pci_unmap_single(qdev->pdev, - pci_unmap_addr(&tx_cb->map[seg], mapaddr), - pci_unmap_len(&tx_cb->map[seg], maplen), + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); oal++; seg++; } pci_unmap_page(qdev->pdev, - pci_unmap_addr(&tx_cb->map[seg], mapaddr), - pci_unmap_len(&tx_cb->map[seg], maplen), + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), PCI_DMA_TODEVICE); } pci_unmap_single(qdev->pdev, - pci_unmap_addr(&tx_cb->map[0], mapaddr), - pci_unmap_addr(&tx_cb->map[0], maplen), + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_addr(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); return NETDEV_TX_BUSY; @@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev) if (lrg_buf_cb->skb) { dev_kfree_skb(lrg_buf_cb->skb); pci_unmap_single(qdev->pdev, - pci_unmap_addr(lrg_buf_cb, mapaddr), - pci_unmap_len(lrg_buf_cb, maplen), + dma_unmap_addr(lrg_buf_cb, mapaddr), + dma_unmap_len(lrg_buf_cb, maplen), PCI_DMA_FROMDEVICE); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); } else { @@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) return -ENOMEM; } - pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); - pci_unmap_len_set(lrg_buf_cb, maplen, + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); lrg_buf_cb->buf_phy_addr_low = @@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work) "%s: Freeing lost SKB.\n", qdev->ndev->name); pci_unmap_single(qdev->pdev, - pci_unmap_addr(&tx_cb->map[0], mapaddr), - pci_unmap_len(&tx_cb->map[0], maplen), + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); for(j=1;j<tx_cb->seg_count;j++) { pci_unmap_page(qdev->pdev, - pci_unmap_addr(&tx_cb->map[j],mapaddr), - pci_unmap_len(&tx_cb->map[j],maplen), + dma_unmap_addr(&tx_cb->map[j],mapaddr), + dma_unmap_len(&tx_cb->map[j],maplen), PCI_DMA_TODEVICE); } dev_kfree_skb(tx_cb->skb); diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 7113e71b15a..3362a661248 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h @@ -998,8 +998,8 @@ enum link_state_t { struct ql_rcv_buf_cb { struct ql_rcv_buf_cb *next; struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_LEN(maplen); + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); __le32 buf_phy_addr_low; __le32 buf_phy_addr_high; int index; @@ -1029,8 +1029,8 @@ struct oal { }; struct map_list { - DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_LEN(maplen); + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); }; struct ql_tx_buf_cb { diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 0da94b208db..28c148cbe37 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -51,8 +51,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 0 -#define QLCNIC_LINUX_VERSIONID "5.0.0" +#define _QLCNIC_LINUX_SUBVERSION 1 +#define QLCNIC_LINUX_VERSIONID "5.0.1" #define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) @@ -958,8 +958,10 @@ struct qlcnic_adapter { u8 dev_state; u8 diag_test; u8 diag_cnt; + u8 reset_ack_timeo; + u8 dev_init_timeo; u8 rsrd1; - u16 rsrd2; + u16 msg_enable; u8 mac_addr[ETH_ALEN]; @@ -994,6 +996,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); +void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *); +void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64); + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) #define QLCRD32(adapter, off) \ (qlcnic_hw_read_wx_2M(adapter, off)) @@ -1035,6 +1042,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); void qlcnic_request_firmware(struct qlcnic_adapter *adapter); void qlcnic_release_firmware(struct qlcnic_adapter *adapter); int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); +void qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, @@ -1128,4 +1136,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) extern const struct ethtool_ops qlcnic_ethtool_ops; +#define QLCDB(adapter, lvl, _fmt, _args...) do { \ + if (NETIF_MSG_##lvl & adapter->msg_enable) \ + printk(KERN_INFO "%s: %s: " _fmt, \ + dev_name(&adapter->pdev->dev), \ + __func__, ##_args); \ + } while (0) + #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index f83e15fe3e1..08d6f105371 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) return 0; } +static u32 qlcnic_get_msglevel(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglvl; +} + const struct ethtool_ops qlcnic_ethtool_ops = { .get_settings = qlcnic_get_settings, .set_settings = qlcnic_set_settings, @@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = { .get_flags = ethtool_op_get_flags, .set_flags = qlcnic_set_flags, .phys_id = qlcnic_blink_led, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, }; diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h index 0469f84360a..51fa3fbcf58 100644 --- a/drivers/net/qlcnic/qlcnic_hdr.h +++ b/drivers/net/qlcnic/qlcnic_hdr.h @@ -435,9 +435,10 @@ enum { #define QLCNIC_PCI_MS_2M (0x80000) #define QLCNIC_PCI_OCM0_2M (0x000c0000UL) #define QLCNIC_PCI_CRBSPACE (0x06000000UL) +#define QLCNIC_PCI_CAMQM (0x04800000UL) +#define QLCNIC_PCI_CAMQM_END (0x04800800UL) #define QLCNIC_PCI_2MB_SIZE (0x00200000UL) #define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) -#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL) #define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) @@ -448,7 +449,7 @@ enum { #define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) #define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) #define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) -#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) +#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) /* * Register offsets for MN @@ -694,6 +695,8 @@ enum { #define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) #define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) #define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c)) +#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) +#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) /* Device State */ #define QLCNIC_DEV_COLD 1 diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index da00e162b6d..7a72b8d06bc 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c @@ -24,6 +24,7 @@ #include "qlcnic.h" +#include <linux/slab.h> #include <net/ip.h> #define MASK(n) ((1ULL<<(n))-1) @@ -53,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr) } #endif -#define ADDR_IN_RANGE(addr, low, high) \ - (((addr) < (high)) && ((addr) >= (low))) - -#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ - ((adapter)->ahw.pci_base0 + (off)) - -static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter, - unsigned long off) -{ - if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) - return PCI_OFFSET_FIRST_RANGE(adapter, off); - - return NULL; -} - static const struct crb_128M_2M_block_map crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ @@ -309,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); if (done == 1) break; - if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) + if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock;reg_id=%d\n", + sem, id_reg); return -EIO; + } msleep(1); } @@ -426,10 +416,13 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr) void qlcnic_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + qlcnic_nic_add_mac(adapter, adapter->mac_addr); qlcnic_nic_add_mac(adapter, bcast_addr); @@ -445,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev) } if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(mc_ptr, netdev) { - qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + qlcnic_nic_add_mac(adapter, ha->addr); } } @@ -874,13 +867,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter, u64 addr, u32 *start) { u32 window; - struct pci_dev *pdev = adapter->pdev; - - if ((addr & 0x00ff800) == 0xff800) { - if (printk_ratelimit()) - dev_warn(&pdev->dev, "QM access not handled\n"); - return -EIO; - } window = OCM_WIN_P3P(addr); @@ -897,8 +883,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, u64 *data, int op) { - void __iomem *addr, *mem_ptr = NULL; - resource_size_t mem_base; + void __iomem *addr; int ret; u32 start; @@ -908,21 +893,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, if (ret != 0) goto unlock; - addr = pci_base_offset(adapter, start); - if (addr) - goto noremap; - - mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK); + addr = adapter->ahw.pci_base0 + start; - mem_ptr = ioremap(mem_base, PAGE_SIZE); - if (mem_ptr == NULL) { - ret = -EIO; - goto unlock; - } - - addr = mem_ptr + (start & (PAGE_SIZE - 1)); - -noremap: if (op == 0) /* read */ *data = readq(addr); else /* write */ @@ -931,11 +903,31 @@ noremap: unlock: mutex_unlock(&adapter->ahw.mem_lock); - if (mem_ptr) - iounmap(mem_ptr); return ret; } +void +qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw.mem_lock); + *data = readq(addr); + mutex_unlock(&adapter->ahw.mem_lock); +} + +void +qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw.mem_lock); + writeq(data, addr); + mutex_unlock(&adapter->ahw.mem_lock); +} + #define MAX_CTL_CHECK 1000 int @@ -944,7 +936,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, { int i, j, ret; u32 temp, off8; - u64 stride; void __iomem *mem_crb; /* Only 64-bit aligned access */ @@ -953,7 +944,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX_P3)) { + QLCNIC_ADDR_QDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; @@ -971,9 +962,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, return -EIO; correct: - stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; - - off8 = off & ~(stride-1); + off8 = off & ~0xf; mutex_lock(&adapter->ahw.mem_lock); @@ -981,30 +970,28 @@ correct: writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); i = 0; - if (stride == 16) { - writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); - writel((TA_CTL_START | TA_CTL_ENABLE), - (mem_crb + TEST_AGT_CTRL)); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = readl(mem_crb + TEST_AGT_CTRL); - if ((temp & TA_CTL_BUSY) == 0) - break; - } + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), + (mem_crb + TEST_AGT_CTRL)); - if (j >= MAX_CTL_CHECK) { - ret = -EIO; - goto done; - } + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } - i = (off & 0xf) ? 0 : 2; - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), - mem_crb + MIU_TEST_AGT_WRDATA(i)); - writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), - mem_crb + MIU_TEST_AGT_WRDATA(i+1)); - i = (off & 0xf) ? 2 : 0; + if (j >= MAX_CTL_CHECK) { + ret = -EIO; + goto done; } + i = (off & 0xf) ? 0 : 2; + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), + mem_crb + MIU_TEST_AGT_WRDATA(i)); + writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), + mem_crb + MIU_TEST_AGT_WRDATA(i+1)); + i = (off & 0xf) ? 2 : 0; + writel(data & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA(i)); writel((data >> 32) & 0xffffffff, @@ -1040,7 +1027,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, { int j, ret; u32 temp, off8; - u64 val, stride; + u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ @@ -1049,7 +1036,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, - QLCNIC_ADDR_QDR_NET_MAX_P3)) { + QLCNIC_ADDR_QDR_NET_MAX)) { mem_crb = qlcnic_get_ioaddr(adapter, QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; @@ -1069,9 +1056,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, return -EIO; correct: - stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; - - off8 = off & ~(stride-1); + off8 = off & ~0xf; mutex_lock(&adapter->ahw.mem_lock); @@ -1093,7 +1078,7 @@ correct: ret = -EIO; } else { off8 = MIU_TEST_AGT_RDDATA_LO; - if ((stride == 16) && (off & 0xf)) + if (off & 0xf) off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; temp = readl(mem_crb + off8 + 4); diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 7c34e4e29b3..01ce74ee99f 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -24,6 +24,7 @@ #include <linux/netdevice.h> #include <linux/delay.h> +#include <linux/slab.h> #include "qlcnic.h" struct crb_addr_pair { @@ -529,6 +530,22 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) return 0; } +void +qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { + + int timeo; + + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) + timeo = 30; + + adapter->dev_init_timeo = timeo; + + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) + timeo = 10; + + adapter->reset_ack_timeo = timeo; +} + static int qlcnic_has_mn(struct qlcnic_adapter *adapter) { @@ -611,7 +628,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size * (idx + 1))); + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; @@ -620,7 +637,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter) (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; - data_size = descr->findex + cpu_to_le32(descr->size); + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; @@ -646,7 +663,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + - (cpu_to_le32(tab_desc->entry_size * (idx + 1))); + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; @@ -654,7 +671,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter) offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; - data_size = descr->findex + cpu_to_le32(descr->size); + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; @@ -949,6 +966,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter) flashaddr += 8; } + + size = (__force u32)qlcnic_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + } + } else { u64 data; u32 hi, lo; diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index fc721564e69..e4fd5dcdfb4 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -22,6 +22,7 @@ * */ +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> @@ -649,7 +650,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter) if (err) return err; - if (!qlcnic_can_start_firmware(adapter)) + err = qlcnic_can_start_firmware(adapter); + if (err < 0) + return err; + else if (!err) goto wait_init; first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc)); @@ -949,11 +953,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) adapter->max_sds_rings = max_sds_rings; if (qlcnic_attach(adapter)) - return; + goto out; if (netif_running(netdev)) __qlcnic_up(adapter, netdev); - +out: netif_device_attach(netdev); } @@ -975,8 +979,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) adapter->diag_test = test; ret = qlcnic_attach(adapter); - if (ret) + if (ret) { + netif_device_attach(netdev); return ret; + } if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->max_sds_rings; ring++) { @@ -1009,16 +1015,12 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter) if (netif_running(netdev)) { err = qlcnic_attach(adapter); if (!err) - err = __qlcnic_up(adapter, netdev); - - if (err) - goto done; + __qlcnic_up(adapter, netdev); } netif_device_attach(netdev); } -done: clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; } @@ -1138,6 +1140,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } + qlcnic_setup_idc_param(adapter); err = qlcnic_start_firmware(adapter); if (err) @@ -1333,6 +1336,7 @@ err_out_detach: qlcnic_detach(adapter); err_out: qlcnic_clr_all_drv_state(adapter); + netif_device_attach(netdev); return err; } #endif @@ -1738,6 +1742,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work) request_reset: adapter->need_fw_reset = 1; clear_bit(__QLCNIC_RESETTING, &adapter->state); + QLCDB(adapter, DRV, "Resetting adapter\n"); } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) @@ -2027,7 +2032,7 @@ static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) { u32 val, prev_state; - int cnt = 0; + u8 dev_init_timeo = adapter->dev_init_timeo; int portnum = adapter->portnum; if (qlcnic_api_lock(adapter)) @@ -2042,6 +2047,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) } prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Device state = %u\n", prev_state); switch (prev_state) { case QLCNIC_DEV_COLD: @@ -2072,13 +2078,17 @@ start_fw: } qlcnic_api_unlock(adapter); - msleep(1000); - while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) && - ++cnt < 20) + + do { msleep(1000); + } while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) + && --dev_init_timeo); - if (cnt >= 20) + if (!dev_init_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for device to initialize timeout\n"); return -1; + } if (qlcnic_api_lock(adapter)) return -1; @@ -2099,17 +2109,16 @@ qlcnic_fwinit_work(struct work_struct *work) struct qlcnic_adapter, fw_work.work); int dev_state; - if (++adapter->fw_wait_cnt > FW_POLL_THRESH) - goto err_ret; - if (test_bit(__QLCNIC_START_FW, &adapter->state)) { - if (qlcnic_check_drv_state(adapter)) { + if (qlcnic_check_drv_state(adapter) && + (adapter->fw_wait_cnt++ < adapter->reset_ack_timeo)) { qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); return; } + QLCDB(adapter, DRV, "Resetting FW\n"); if (!qlcnic_start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); return; @@ -2118,7 +2127,15 @@ qlcnic_fwinit_work(struct work_struct *work) goto err_ret; } + if (adapter->fw_wait_cnt++ > (adapter->dev_init_timeo / 2)) { + dev_err(&adapter->pdev->dev, + "Waiting for device to reset timeout\n"); + goto err_ret; + } + dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Func waiting: Device state=%d\n", dev_state); + switch (dev_state) { case QLCNIC_DEV_READY: if (!qlcnic_start_firmware(adapter)) { @@ -2135,6 +2152,7 @@ qlcnic_fwinit_work(struct work_struct *work) } err_ret: + netif_device_attach(adapter->netdev); qlcnic_clr_all_drv_state(adapter); } @@ -2171,6 +2189,9 @@ qlcnic_detach_work(struct work_struct *work) return; err_ret: + dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", + status, adapter->temp); + netif_device_attach(netdev); qlcnic_clr_all_drv_state(adapter); } @@ -2188,6 +2209,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) { QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); set_bit(__QLCNIC_START_FW, &adapter->state); + QLCDB(adapter, DRV, "NEED_RESET state set\n"); } qlcnic_api_unlock(adapter); @@ -2232,9 +2254,8 @@ qlcnic_attach_work(struct work_struct *work) qlcnic_config_indev_addr(netdev, NETDEV_UP); } - netif_device_attach(netdev); - done: + netif_device_attach(netdev); adapter->fw_fail_cnt = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); @@ -2284,8 +2305,11 @@ detach: QLCNIC_DEV_NEED_RESET; if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && - !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { + qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); + QLCDB(adapter, DRV, "fw recovery scheduled.\n"); + } return 1; } @@ -2386,14 +2410,21 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, loff_t offset, size_t size) { + size_t crb_size = 4; + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) return -EIO; - if ((size != 4) || (offset & 0x3)) - return -EINVAL; + if (offset < QLCNIC_PCI_CRBSPACE) { + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, + QLCNIC_PCI_CAMQM_END)) + crb_size = 8; + else + return -EINVAL; + } - if (offset < QLCNIC_PCI_CRBSPACE) - return -EINVAL; + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; return 0; } @@ -2405,14 +2436,20 @@ qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u32 data; + u64 qmdata; int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; - data = QLCRD32(adapter, offset); - memcpy(buf, &data, size); + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = QLCRD32(adapter, offset); + memcpy(buf, &data, size); + } return size; } @@ -2423,14 +2460,20 @@ qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u32 data; + u64 qmdata; int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; - memcpy(&data, buf, size); - QLCWR32(adapter, offset, data); + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + memcpy(&qmdata, buf, size); + qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + QLCWR32(adapter, offset, data); + } return size; } diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 8b742b639ce..20624ba44a3 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -1344,8 +1344,8 @@ struct oal { }; struct map_list { - DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_LEN(maplen); + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); }; struct tx_ring_desc { @@ -1373,8 +1373,8 @@ struct bq_desc { } p; __le64 *addr; u32 index; - DECLARE_PCI_UNMAP_ADDR(mapaddr); - DECLARE_PCI_UNMAP_LEN(maplen); + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); }; #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index ff8550d2ca8..68a1c9b91e7 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c @@ -1,3 +1,5 @@ +#include <linux/slab.h> + #include "qlge.h" /* Read a NIC register from the alternate function. */ @@ -1338,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work) for (i = 0; i < count; i += 8) { printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " - "%.08x %.08x %.08x \n", i, + "%.08x %.08x %.08x\n", i, tmp[i + 0], tmp[i + 1], tmp[i + 2], @@ -2056,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); - printk(KERN_ERR PFX "flags3 = %s %s %s \n", + printk(KERN_ERR PFX "flags3 = %s %s %s\n", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index 05b8bde9980..7e09ff4a575 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -7,7 +7,6 @@ #include <linux/dma-mapping.h> #include <linux/pagemap.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> @@ -405,7 +404,7 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) u32 wol = 0; status = ql_mb_wol_mode(qdev, wol); netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", - status == 0 ? "cleared sucessfully" : "clear failed", + status == 0 ? "cleared successfully" : "clear failed", wol); } diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c26ec5d740f..fa4b24c49f4 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, - pci_unmap_addr(lbq_desc, mapaddr), + dma_unmap_addr(lbq_desc, mapaddr), rx_ring->lbq_buf_size, PCI_DMA_FROMDEVICE); @@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; - pci_unmap_addr_set(lbq_desc, mapaddr, map); - pci_unmap_len_set(lbq_desc, maplen, + dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_len_set(lbq_desc, maplen, rx_ring->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); @@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) sbq_desc->p.skb = NULL; return; } - pci_unmap_addr_set(sbq_desc, mapaddr, map); - pci_unmap_len_set(sbq_desc, maplen, + dma_unmap_addr_set(sbq_desc, mapaddr, map); + dma_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size); *sbq_desc->addr = cpu_to_le64(map); } @@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev, "unmapping OAL area.\n"); } pci_unmap_single(qdev->pdev, - pci_unmap_addr(&tx_ring_desc->map[i], + dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), - pci_unmap_len(&tx_ring_desc->map[i], + dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } else { netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, "unmapping frag %d.\n", i); pci_unmap_page(qdev->pdev, - pci_unmap_addr(&tx_ring_desc->map[i], + dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), - pci_unmap_len(&tx_ring_desc->map[i], + dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } } @@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev, tbd->len = cpu_to_le32(len); tbd->addr = cpu_to_le64(map); - pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); map_idx++; /* @@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev, tbd->len = cpu_to_le32((sizeof(struct tx_buf_desc) * (frag_cnt - frag_idx)) | TX_DESC_C); - pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, sizeof(struct oal)); tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; map_idx++; @@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev, tbd->addr = cpu_to_le64(map); tbd->len = cpu_to_le32(frag->size); - pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, frag->size); } @@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, - pci_unmap_addr(sbq_desc, mapaddr), - pci_unmap_len(sbq_desc, maplen), + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); @@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, - pci_unmap_addr + dma_unmap_addr (sbq_desc, mapaddr), - pci_unmap_len + dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, length), sbq_desc->p.skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, - pci_unmap_addr + dma_unmap_addr (sbq_desc, mapaddr), - pci_unmap_len + dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); @@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ql_realign_skb(skb, length); skb_put(skb, length); pci_unmap_single(qdev->pdev, - pci_unmap_addr(sbq_desc, + dma_unmap_addr(sbq_desc, mapaddr), - pci_unmap_len(sbq_desc, + dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; @@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, return NULL; } pci_unmap_page(qdev->pdev, - pci_unmap_addr(lbq_desc, + dma_unmap_addr(lbq_desc, mapaddr), - pci_unmap_len(lbq_desc, maplen), + dma_unmap_len(lbq_desc, maplen), PCI_DMA_FROMDEVICE); skb_reserve(skb, NET_IP_ALIGN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, @@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, int size, i = 0; sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, - pci_unmap_addr(sbq_desc, mapaddr), - pci_unmap_len(sbq_desc, maplen), + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* @@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring } if (sbq_desc->p.skb) { pci_unmap_single(qdev->pdev, - pci_unmap_addr(sbq_desc, mapaddr), - pci_unmap_len(sbq_desc, maplen), + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); sbq_desc->p.skb = NULL; @@ -3855,7 +3855,7 @@ int ql_wol(struct ql_adapter *qdev) status = ql_mb_wol_mode(qdev, wol); netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x) on %s\n", - (status == 0) ? "Sucessfully set" : "Failed", + (status == 0) ? "Successfully set" : "Failed", wol, qdev->ndev->name); } @@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device static void qlge_set_multicast_list(struct net_device *ndev) { struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; int i, status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); @@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) if (status) goto exit; i = 0; - netdev_for_each_mc_addr(mc_ptr, ndev) { - if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, + netdev_for_each_mc_addr(ha, ndev) { + if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, MAC_ADDR_TYPE_MULTI_MAC, i)) { netif_err(qdev, hw, qdev->ndev, "Failed to loadmulticast address.\n"); diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 15d5373dc8f..41229164559 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -29,7 +29,6 @@ #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -135,7 +134,7 @@ #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ -#define MCAST_MAX 4 /* Max number multicast addresses to filter */ +#define MCAST_MAX 3 /* Max number multicast addresses to filter */ /* Descriptor status */ #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ @@ -331,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev) do { skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); if (!skb) { - printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name); + netdev_err(dev, "failed to alloc skb for rx\n"); rc = -ENOMEM; goto err_exit; } @@ -411,9 +410,9 @@ static void r6040_tx_timeout(struct net_device *dev) struct r6040_private *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; - printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x " + netdev_warn(dev, "transmit timed out, int enable %4.4x " "status %4.4x, PHY status %4.4x\n", - dev->name, ioread16(ioaddr + MIER), + ioread16(ioaddr + MIER), ioread16(ioaddr + MISR), r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); @@ -898,7 +897,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, if (!lp->tx_free_desc) { spin_unlock_irqrestore(&lp->lock, flags); netif_stop_queue(dev); - printk(KERN_ERR DRV_NAME ": no tx descriptor\n"); + netdev_err(dev, ": no tx descriptor\n"); return NETDEV_TX_BUSY; } @@ -938,7 +937,7 @@ static void r6040_multicast_list(struct net_device *dev) u16 *adrp; u16 reg; unsigned long flags; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; /* MAC Address */ @@ -973,8 +972,8 @@ static void r6040_multicast_list(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; @@ -983,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev) crc >>= 26; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } - /* Write the index of the hash table */ - for (i = 0; i < 4; i++) - iowrite16(hash_table[i] << 14, ioaddr + MCR1); /* Fill the MAC hash tables with their values */ iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); @@ -994,16 +990,16 @@ static void r6040_multicast_list(struct net_device *dev) } /* Multicast Address 1~4 case */ i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i < MCAST_MAX) { - adrp = (u16 *) dmi->dmi_addr; + adrp = (u16 *) ha->addr; iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); } else { - iowrite16(0xffff, ioaddr + MID_0L + 8 * i); - iowrite16(0xffff, ioaddr + MID_0M + 8 * i); - iowrite16(0xffff, ioaddr + MID_0H + 8 * i); + iowrite16(0xffff, ioaddr + MID_1L + 8 * i); + iowrite16(0xffff, ioaddr + MID_1M + 8 * i); + iowrite16(0xffff, ioaddr + MID_1H + 8 * i); } i++; } @@ -1094,20 +1090,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, /* this should always be supported */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" + dev_err(&pdev->dev, "32-bit PCI DMA addresses" "not supported by the card\n"); goto err_out; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" + dev_err(&pdev->dev, "32-bit PCI DMA addresses" "not supported by the card\n"); goto err_out; } /* IO Size check */ if (pci_resource_len(pdev, bar) < io_size) { - printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n"); + dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); err = -EIO; goto err_out; } @@ -1116,7 +1112,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, dev = alloc_etherdev(sizeof(struct r6040_private)); if (!dev) { - printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n"); + dev_err(&pdev->dev, "Failed to allocate etherdev\n"); err = -ENOMEM; goto err_out; } @@ -1126,14 +1122,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, err = pci_request_regions(pdev, DRV_NAME); if (err) { - printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); + dev_err(&pdev->dev, "Failed to request PCI regions\n"); goto err_out_free_dev; } ioaddr = pci_iomap(pdev, bar, io_size); if (!ioaddr) { - printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n", - pci_name(pdev)); + dev_err(&pdev->dev, "ioremap failed for device\n"); err = -EIO; goto err_out_free_res; } @@ -1160,7 +1155,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, /* Some bootloader/BIOSes do not initialize * MAC address, warn about that */ if (!(adrp[0] || adrp[1] || adrp[2])) { - printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n"); + netdev_warn(dev, "MAC address not initialized, generating random\n"); random_ether_addr(dev->dev_addr); } @@ -1188,7 +1183,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, /* Check the vendor ID on the PHY, if 0xffff assume none attached */ if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) { - printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n"); + dev_err(&pdev->dev, "Failed to detect an attached PHY\n"); err = -ENODEV; goto err_out_unmap; } @@ -1196,7 +1191,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, /* Register net device. After this dev->name assign */ err = register_netdev(dev); if (err) { - printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); + dev_err(&pdev->dev, "Failed to register net device\n"); goto err_out_unmap; } return 0; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 9d3ebf3e975..340da3915b9 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -23,6 +23,7 @@ #include <linux/tcp.h> #include <linux/init.h> #include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> #include <asm/system.h> #include <asm/io.h> @@ -186,8 +187,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -static int rx_copybreak = 200; -static int use_dac = -1; +/* + * we set our copybreak very high so that we don't have + * to allocate 16k frames all the time (see note in + * rtl8169_open() + */ +static int rx_copybreak = 16383; +static int use_dac; static struct { u32 msg_enable; } debug = { -1 }; @@ -504,6 +510,7 @@ struct rtl8169_private { struct mii_if_info mii; struct rtl8169_counters counters; + u32 saved_wolopts; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); @@ -511,8 +518,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param(use_dac, int, 0); -MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." -" Unsafe on 32 bit PCI slot."); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_LICENSE("GPL"); @@ -744,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev, spin_lock_irqsave(&tp->lock, flags); if (tp->link_ok(ioaddr)) { + /* This is to cancel a scheduled suspend if there's one. */ + pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); + pm_schedule_suspend(&tp->pci_dev->dev, 100); } spin_unlock_irqrestore(&tp->lock, flags); } -static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u8 options; - - wol->wolopts = 0; - -#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) - wol->supported = WAKE_ANY; - - spin_lock_irq(&tp->lock); + u32 wolopts = 0; options = RTL_R8(Config1); if (!(options & PMEnable)) - goto out_unlock; + return 0; options = RTL_R8(Config3); if (options & LinkUp) - wol->wolopts |= WAKE_PHY; + wolopts |= WAKE_PHY; if (options & MagicPacket) - wol->wolopts |= WAKE_MAGIC; + wolopts |= WAKE_MAGIC; options = RTL_R8(Config5); if (options & UWF) - wol->wolopts |= WAKE_UCAST; + wolopts |= WAKE_UCAST; if (options & BWF) - wol->wolopts |= WAKE_BCAST; + wolopts |= WAKE_BCAST; if (options & MWF) - wol->wolopts |= WAKE_MCAST; + wolopts |= WAKE_MCAST; -out_unlock: - spin_unlock_irq(&tp->lock); + return wolopts; } -static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + spin_unlock_irq(&tp->lock); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ void __iomem *ioaddr = tp->mmio_addr; unsigned int i; static const struct { @@ -807,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { WAKE_ANY, Config5, LanWake } }; - spin_lock_irq(&tp->lock); - RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; - if (wol->wolopts & cfg[i].opt) + if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); if (wol->wolopts) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); spin_unlock_irq(&tp->lock); @@ -1038,14 +1058,14 @@ static void rtl8169_vlan_rx_register(struct net_device *dev, } static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, - struct sk_buff *skb) + struct sk_buff *skb, int polling) { u32 opts2 = le32_to_cpu(desc->opts2); struct vlan_group *vlgrp = tp->vlgrp; int ret; if (vlgrp && (opts2 & RxVlanTag)) { - vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff)); + __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling); ret = 0; } else ret = -1; @@ -1062,7 +1082,7 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, } static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, - struct sk_buff *skb) + struct sk_buff *skb, int polling) { return -1; } @@ -2821,8 +2841,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) spin_lock_irq(&tp->lock); RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W32(MAC0, low); RTL_W32(MAC4, high); + RTL_W32(MAC0, low); RTL_W8(Cfg9346, Cfg9346_Lock); spin_unlock_irq(&tp->lock); @@ -2974,7 +2994,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioaddr; unsigned int i; int rc; - int this_use_dac = use_dac; if (netif_msg_drv(&debug)) { printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", @@ -3040,17 +3059,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->cp_cmd = PCIMulRW | RxChkSum; - tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (!tp->pcie_cap) - netif_info(tp, probe, dev, "no PCI Express capability\n"); - - if (this_use_dac < 0) - this_use_dac = tp->pcie_cap != 0; - if ((sizeof(dma_addr_t) > 4) && - this_use_dac && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - netif_info(tp, probe, dev, "using 64-bit DMA\n"); + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { tp->cp_cmd |= PCIDAC; dev->features |= NETIF_F_HIGHDMA; } else { @@ -3069,6 +3079,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_res_4; } + tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!tp->pcie_cap) + netif_info(tp, probe, dev, "no PCI Express capability\n"); + RTL_W16(IntrMask, 0x0000); /* Soft reset the chip. */ @@ -3189,6 +3203,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + if (pci_dev_run_wake(pdev)) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } + pm_runtime_idle(&pdev->dev); + out: return rc; @@ -3211,10 +3231,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + pm_runtime_get_sync(&pdev->dev); + flush_scheduled_work(); unregister_netdev(dev); + if (pci_dev_run_wake(pdev)) { + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + } + pm_runtime_put_noidle(&pdev->dev); + /* restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); @@ -3224,9 +3252,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) } static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, - struct net_device *dev) + unsigned int mtu) { - unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + if (max_frame != 16383) + printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " + "NIC may lead to frame reception errors!\n"); tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; } @@ -3237,8 +3269,19 @@ static int rtl8169_open(struct net_device *dev) struct pci_dev *pdev = tp->pci_dev; int retval = -ENOMEM; + pm_runtime_get_sync(&pdev->dev); - rtl8169_set_rxbufsize(tp, dev); + /* + * Note that we use a magic value here, its wierd I know + * its done because, some subset of rtl8169 hardware suffers from + * a problem in which frames received that are longer than + * the size set in RxMaxSize register return garbage sizes + * when received. To avoid this we need to turn off filtering, + * which is done by setting a value of 16383 in the RxMaxSize register + * and allocating 16k frames to handle the largest possible rx value + * thats what the magic math below does. + */ + rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); /* * Rx and Tx desscriptors needs 256 bytes alignment. @@ -3247,7 +3290,7 @@ static int rtl8169_open(struct net_device *dev) tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, &tp->TxPhyAddr); if (!tp->TxDescArray) - goto out; + goto err_pm_runtime_put; tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, &tp->RxPhyAddr); @@ -3274,6 +3317,9 @@ static int rtl8169_open(struct net_device *dev) rtl8169_request_timer(dev); + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); out: return retval; @@ -3283,9 +3329,13 @@ err_release_ring_2: err_free_rx_1: pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); + tp->RxDescArray = NULL; err_free_tx_0: pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); goto out; } @@ -3891,7 +3941,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) rtl8169_down(dev); - rtl8169_set_rxbufsize(tp, dev); + rtl8169_set_rxbufsize(tp, dev->mtu); ret = rtl8169_init_ring(dev); if (ret < 0) @@ -4429,12 +4479,20 @@ out: return done; } +/* + * Warning : rtl8169_rx_interrupt() might be called : + * 1) from NAPI (softirq) context + * (polling = 1 : we should call netif_receive_skb()) + * 2) from process context (rtl8169_reset_task()) + * (polling = 0 : we must call netif_rx() instead) + */ static int rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, void __iomem *ioaddr, u32 budget) { unsigned int cur_rx, rx_left; unsigned int delta, count; + int polling = (budget != ~(u32)0) ? 1 : 0; cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; @@ -4496,8 +4554,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev, skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); - if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) - netif_receive_skb(skb); + if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { + if (likely(polling)) + netif_receive_skb(skb); + else + netif_rx(skb); + } dev->stats.rx_bytes += pkt_size; dev->stats.rx_packets++; @@ -4692,6 +4754,8 @@ static int rtl8169_close(struct net_device *dev) struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; + pm_runtime_get_sync(&pdev->dev); + /* update counters before going down */ rtl8169_update_counters(dev); @@ -4706,6 +4770,8 @@ static int rtl8169_close(struct net_device *dev) tp->TxDescArray = NULL; tp->RxDescArray = NULL; + pm_runtime_put_sync(&pdev->dev); + return 0; } @@ -4731,12 +4797,12 @@ static void rtl_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } @@ -4754,8 +4820,8 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } - RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(RxConfig, tmp); @@ -4804,21 +4870,74 @@ static int rtl8169_suspend(struct device *device) return 0; } +static void __rtl8169_resume(struct net_device *dev) +{ + netif_device_attach(dev); + rtl8169_schedule_work(dev, rtl8169_reset_task); +} + static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - if (!netif_running(dev)) - goto out; + if (netif_running(dev)) + __rtl8169_resume(dev); - netif_device_attach(dev); + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + spin_unlock_irq(&tp->lock); + + rtl8169_net_suspend(dev); - rtl8169_schedule_work(dev, rtl8169_reset_task); -out: return 0; } +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + spin_unlock_irq(&tp->lock); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl8169_check_link_status(dev, tp, tp->mmio_addr); + return -EBUSY; +} + static const struct dev_pm_ops rtl8169_pm_ops = { .suspend = rtl8169_suspend, .resume = rtl8169_resume, @@ -4826,6 +4945,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = { .thaw = rtl8169_resume, .poweroff = rtl8169_suspend, .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, }; #define RTL8169_PM_OPS (&rtl8169_pm_ops) diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index ede937ee50c..07eb884ff98 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/rio.h> #include <linux/rio_drv.h> +#include <linux/slab.h> #include <linux/rio_ids.h> #include <linux/netdevice.h> diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 266baf53496..f2e335f0d1b 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -40,6 +40,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/slab.h> #include <net/sock.h> #include <asm/system.h> diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index df70657260d..f155928bf14 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -79,6 +79,7 @@ #include <linux/tcp.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/slab.h> #include <net/tcp.h> #include <asm/system.h> @@ -2399,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, return NULL; } pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, - skb->len - skb->data_len, PCI_DMA_TODEVICE); + skb_headlen(skb), PCI_DMA_TODEVICE); frg_cnt = skb_shinfo(skb)->nr_frags; if (frg_cnt) { txds++; @@ -4201,7 +4202,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); } - frg_len = skb->len - skb->data_len; + frg_len = skb_headlen(skb); if (offload_type == SKB_GSO_UDP) { int ufo_size; @@ -4964,7 +4965,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) static void s2io_set_multicast(struct net_device *dev) { int i, j, prev_cnt; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct s2io_nic *sp = netdev_priv(dev); struct XENA_dev_config __iomem *bar0 = sp->bar0; u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = @@ -5093,12 +5094,12 @@ static void s2io_set_multicast(struct net_device *dev) /* Create the new Rx filter list and update the same in H/W. */ i = 0; - netdev_for_each_mc_addr(mclist, dev) { - memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, + netdev_for_each_mc_addr(ha, dev) { + memcpy(sp->usr_addrs[i].addr, ha->addr, ETH_ALEN); mac_addr = 0; for (j = 0; j < ETH_ALEN; j++) { - mac_addr |= mclist->dmi_addr[j]; + mac_addr |= ha->addr[j]; mac_addr <<= 8; } mac_addr >>= 8; @@ -5819,10 +5820,8 @@ static void s2io_vpd_read(struct s2io_nic *nic) } } - if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { - memset(nic->product_name, 0, vpd_data[1]); + if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); - } kfree(vpd_data); swstats->mem_freed += 256; } diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c index 45f26344b36..6b12524ad7c 100644 --- a/drivers/net/s6gmac.c +++ b/drivers/net/s6gmac.c @@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev) } else { skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) & S6_GMAC_BURST_POSTRD_LEN_MASK); - skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx(skb); diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 9f83a119737..abc8eefdd4b 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -42,7 +42,6 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n"; #include <linux/errno.h> #include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */ #include <linux/in.h> -#include <linux/slab.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/if_arp.h> @@ -52,6 +51,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n"; #include <linux/pnp.h> #include <linux/init.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/processor.h> diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 9944e5d662c..332031747a2 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -2112,7 +2112,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc) uint64_t reg; void __iomem *port; int idx; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct net_device *dev = sc->sbm_dev; /* @@ -2161,10 +2161,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc) * XXX if the table overflows */ idx = 1; /* skip station address */ - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (idx == MAC_ADDR_COUNT) break; - reg = sbmac_addr2reg(mclist->dmi_addr); + reg = sbmac_addr2reg(ha->addr); port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); __raw_writeq(reg, port); idx++; @@ -2664,7 +2664,6 @@ static int sbmac_close(struct net_device *dev) static int sbmac_poll(struct napi_struct *napi, int budget) { struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); - struct net_device *dev = sc->sbm_dev; int work_done; work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index d87c4787fff..1b326058893 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev) (dev->flags & IFF_ALLMULTI)) mar0 = mar1 = 0xffffffff; else if (dev->flags & IFF_MULTICAST) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_list, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 crc; unsigned bit = 0; - crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr); + crc = ~ether_crc(ETH_ALEN, ha->addr); crc >>= 24; if (crc & 0x01) bit |= 0x02; diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index fe806bd9b95..374832cca11 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -37,7 +37,6 @@ static const char version[] = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/delay.h> diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 88f2fb193ab..1ad61b7bba4 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -20,6 +20,7 @@ #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/topology.h> +#include <linux/gfp.h> #include "net_driver.h" #include "efx.h" #include "mdio_10g.h" @@ -1602,7 +1603,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) static void efx_set_multicast_list(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; union efx_multicast_hash *mc_hash = &efx->multicast_hash; u32 crc; int bit; @@ -1614,8 +1615,8 @@ static void efx_set_multicast_list(struct net_device *net_dev) memset(mc_hash, 0xff, sizeof(*mc_hash)); } else { memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(mc_list, net_dev) { - crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); set_bit_le(bit, mc_hash->byte); } diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 1b8d83657aa..d294d66fd60 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -15,6 +15,7 @@ #include <linux/seq_file.h> #include <linux/i2c.h> #include <linux/mii.h> +#include <linux/slab.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 34c22fa986e..2f235469666 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c @@ -11,6 +11,7 @@ * Driver for PHY related operations via MCDI. */ +#include <linux/slab.h> #include "efx.h" #include "phy.h" #include "mcdi.h" diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index 407bbaddfea..f3ac7f30b5e 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/delay.h> +#include <linux/slab.h> #include <linux/rtnetlink.h> #define EFX_DRIVER_NAME "sfc_mtd" diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index 1bee62c8300..e077bef08a5 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c @@ -10,6 +10,7 @@ * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details */ +#include <linux/slab.h> #include <linux/timer.h> #include <linux/delay.h> #include "efx.h" diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h index 89d606fe924..18a3be42834 100644 --- a/drivers/net/sfc/regs.h +++ b/drivers/net/sfc/regs.h @@ -95,7 +95,7 @@ #define FRF_AA_INT_ACK_KER_FIELD_LBN 0 #define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 -/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */ +/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ #define FR_BZ_INT_ISR0 0x00000090 #define FRF_BZ_INT_ISR_REG_LBN 0 #define FRF_BZ_INT_ISR_REG_WIDTH 64 diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index a97c923b560..e308818b9f5 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -10,6 +10,7 @@ #include <linux/socket.h> #include <linux/in.h> +#include <linux/slab.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index cf0139a7d9a..0106b1d9aae 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -18,6 +18,7 @@ #include <linux/in.h> #include <linux/udp.h> #include <linux/rtnetlink.h> +#include <linux/slab.h> #include <asm/io.h> #include "net_driver.h" #include "efx.h" diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 1619fb5a64f..38dcc42c4f7 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> +#include <linux/slab.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index 10db071bd83..f21efe7bd31 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/rtnetlink.h> #include <linux/seq_file.h> +#include <linux/slab.h> #include "efx.h" #include "mdio_10g.h" #include "nic.h" diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index a8b70ef6d81..be0e110a1f7 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -13,6 +13,7 @@ #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> +#include <linux/slab.h> #include <net/ipv6.h> #include <linux/if_ether.h> #include <linux/highmem.h> diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index ed999d31f1f..c8fc896fc46 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/types.h> @@ -592,8 +593,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) + if (skb_padto(skb, ETH_ZLEN)) { + spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; + } len = ETH_ZLEN; } diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 42a35f086a9..6242b85d5d1 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -31,6 +31,7 @@ #include <linux/cache.h> #include <linux/io.h> #include <linux/pm_runtime.h> +#include <linux/slab.h> #include <asm/cacheflush.h> #include "sh_eth.h" diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 760d9e83a46..a5d6a6bd0c1 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <asm/irq.h> #define PHY_MAX_ADDR 32 @@ -848,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { int bit_nr = - ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; + ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index cc0c731c4f0..6293592635b 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -1499,7 +1499,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex } if(netif_msg_link(sis_priv)) - printk(KERN_INFO "%s: Media Link On %s %s-duplex \n", + printk(KERN_INFO "%s: Media Link On %s %s-duplex\n", net_dev->name, *speed == HW_SPEED_100_MBPS ? "100mbps" : "10mbps", @@ -1523,7 +1523,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) int i; if(netif_msg_tx_err(sis_priv)) - printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n", + printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); /* Disable interrupts by clearing the interrupt mask. */ @@ -2298,12 +2298,14 @@ static void set_rx_mode(struct net_device *net_dev) /* Accept Broadcast packet, destination address matchs our * MAC address, use Receive Filter to reject unwanted MCAST * packets */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = RFAAB; - netdev_for_each_mc_addr(mclist, net_dev) { - unsigned int bit_nr = - sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev); + netdev_for_each_mc_addr(ha, net_dev) { + unsigned int bit_nr; + + bit_nr = sis900_mcast_bitnr(ha->addr, + sis_priv->chipset_rev); mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); } } diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c index a85efcfd9d0..e8387d25f24 100644 --- a/drivers/net/skfp/ess.c +++ b/drivers/net/skfp/ess.c @@ -557,7 +557,7 @@ static void ess_send_alc_req(struct s_smc *smc) /* * send never allocation request where the requested payload and - * overhead is zero or deallocate bandwidht when no bandwidth is + * overhead is zero or deallocate bandwidth when no bandwidth is * parsed */ if (!smc->mib.fddiESSPayload) { diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c index 6028bbb3b28..9d8d1ac4817 100644 --- a/drivers/net/skfp/fplustm.c +++ b/drivers/net/skfp/fplustm.c @@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc) /* * MIB timer and hardware timer have the same resolution of 80nS */ - DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n", + DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n", (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ; outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ; } diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c index e6b33ee05ed..ba45bc794d7 100644 --- a/drivers/net/skfp/pcmplc.c +++ b/drivers/net/skfp/pcmplc.c @@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy) mib = phy->mib ; - DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ; + DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ; bit++ ; switch(bit) { @@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; break ; } - DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ; + DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ; } /* diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 1921a54ea99..7912606b0bc 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -78,13 +78,13 @@ static const char * const boot_msg = #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/fddidevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -852,7 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev) static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) { struct s_smc *smc = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; /* Enable promiscuous mode, if necessary */ if (dev->flags & IFF_PROMISC) { @@ -876,13 +876,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) /* use exact filtering */ // point to first multicast addr - netdev_for_each_mc_addr(dmi, dev) { - mac_add_multicast(smc, - (struct fddi_addr *)dmi->dmi_addr, - 1); + netdev_for_each_mc_addr(ha, dev) { + mac_add_multicast(smc, + (struct fddi_addr *)ha->addr, + 1); pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n", - dmi->dmi_addr); + ha->addr); } } else { // more MC addresses than HW supports diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c index 6caf713b744..40882b3faba 100644 --- a/drivers/net/skfp/srf.c +++ b/drivers/net/skfp/srf.c @@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc) smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; - DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ; + DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ; DB_SMT("SRF: state SR%d Threshold %d\n", smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; #ifdef DEBUG diff --git a/drivers/net/skge.c b/drivers/net/skge.c index d0058e5bb6a..96eee866687 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -42,6 +42,7 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/mii.h> +#include <linux/slab.h> #include <asm/irq.h> #include "skge.h" @@ -2917,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u32 mode; u8 filter[8]; @@ -2937,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev) skge->flow_status == FLOW_STAT_SYMMETRIC) genesis_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - genesis_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); } xm_write32(hw, port, XM_MODE, mode); @@ -2956,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || skge->flow_status == FLOW_STAT_SYMMETRIC); u16 reg; @@ -2979,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev) if (rx_pause) yukon_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - yukon_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); } @@ -3666,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v) t->csum_offs, t->csum_write, t->csum_start); } - seq_printf(seq, "\nRx Ring: \n"); + seq_printf(seq, "\nRx Ring:\n"); for (e = skge->rx_ring.to_clean; ; e = e->next) { const struct skge_rx_desc *r = e->desc; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 653bdd76ef4..3a086d3a7cb 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -33,6 +33,7 @@ #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/ip.h> +#include <linux/slab.h> #include <net/ip.h> #include <linux/tcp.h> #include <linux/in.h> @@ -226,7 +227,7 @@ static void sky2_power_on(struct sky2_hw *hw) /* disable Core Clock Division, */ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) /* enable bits are inverted */ sky2_write8(hw, B2_Y2_CLK_GATE, Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | @@ -268,7 +269,7 @@ static void sky2_power_on(struct sky2_hw *hw) static void sky2_power_aux(struct sky2_hw *hw) { - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) sky2_write8(hw, B2_Y2_CLK_GATE, 0); else /* enable bits are inverted */ @@ -651,7 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 &= ~phy_power[port]; - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) reg1 |= coma_mode[port]; sky2_pci_write32(hw, PCI_DEV_REG1, reg1); @@ -823,7 +824,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) { + if (hw->chip_id == CHIP_ID_YUKON_XL && + hw->chip_rev == CHIP_REV_YU_XL_A0 && + port == 1) { /* WA DEV_472 -- looks like crossed wires on port 2 */ /* clear GMAC 1 Control reset */ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); @@ -877,6 +880,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_B1) + reg |= GM_NEW_FLOW_CTRL; + gma_write16(hw, port, GM_SERIAL_MODE, reg); /* virtual address for data */ @@ -1413,8 +1420,7 @@ static void sky2_rx_start(struct sky2_port *sky2) /* These chips have no ram buffer? * MAC Rx RAM Read is controlled by hardware */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && - (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || - hw->chip_rev == CHIP_REV_YU_EC_U_B0)) + hw->chip_rev > CHIP_REV_YU_EC_U_A0) sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); @@ -2141,7 +2147,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) istatus, phystat); if (istatus & PHY_M_IS_AN_COMPL) { - if (sky2_autoneg_done(sky2, phystat) == 0) + if (sky2_autoneg_done(sky2, phystat) == 0 && + !netif_carrier_ok(dev)) sky2_link_up(sky2); goto out; } @@ -3621,7 +3628,7 @@ static void sky2_set_multicast(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u16 reg; u8 filter[8]; int rx_pause; @@ -3645,8 +3652,8 @@ static void sky2_set_multicast(struct net_device *dev) if (rx_pause) sky2_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - sky2_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + sky2_add_filter(filter, ha->addr); } gma_write16(hw, port, GM_MC_ADDR_H1, @@ -4863,6 +4870,7 @@ static int sky2_resume(struct pci_dev *pdev) if (!hw) return 0; + rtnl_lock(); err = pci_set_power_state(pdev, PCI_D0); if (err) goto out; @@ -4884,7 +4892,6 @@ static int sky2_resume(struct pci_dev *pdev) sky2_write32(hw, B0_IMSK, Y2_IS_BASE); napi_enable(&hw->napi); - rtnl_lock(); for (i = 0; i < hw->ports; i++) { err = sky2_reattach(hw->dev[i]); if (err) diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index a5e182dd981..0bebfb3638f 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -548,6 +548,14 @@ enum { CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ }; + +enum yukon_xl_rev { + CHIP_REV_YU_XL_A0 = 0, + CHIP_REV_YU_XL_A1 = 1, + CHIP_REV_YU_XL_A2 = 2, + CHIP_REV_YU_XL_A3 = 3, +}; + enum yukon_ec_rev { CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ @@ -557,6 +565,7 @@ enum yukon_ec_u_rev { CHIP_REV_YU_EC_U_A0 = 1, CHIP_REV_YU_EC_U_A1 = 2, CHIP_REV_YU_EC_U_B0 = 3, + CHIP_REV_YU_EC_U_B1 = 5, }; enum yukon_fe_rev { CHIP_REV_YU_FE_A1 = 1, @@ -1775,10 +1784,13 @@ enum { /* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ enum { GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ - GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ - GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ - GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ - GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ + GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */ + + GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */ + + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ }; #define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c index d640c0f5470..140d63f3caf 100644 --- a/drivers/net/slhc.c +++ b/drivers/net/slhc.c @@ -51,6 +51,7 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> diff --git a/drivers/net/slip.c b/drivers/net/slip.c index ba5bbc50344..89696156c05 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -83,6 +83,7 @@ #include <linux/compat.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/slab.h> #include "slip.h" #ifdef CONFIG_INET #include <linux/ip.h> diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 9871a2b61f8..1e49fcfa28a 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -59,7 +59,6 @@ static const char version[] = #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/errno.h> @@ -383,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev) DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", dev->name, __func__); status = SMC_GET_RX_STS_FIFO(lp); - DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", + DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n", dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); pkt_len = (status & RX_STS_PKT_LEN_) >> 16; if (status & RX_STS_ES_) { @@ -1136,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) } #else if (status & INT_STS_TSFL_) { - DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, ); + DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, ); smc911x_tx(dev); SMC_ACK_INT(lp, INT_STS_TSFL_); } @@ -1275,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev) status = SMC_GET_INT(lp); mask = SMC_GET_INT_EN(lp); spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n", + DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n", dev->name, status, mask); /* Dump the current TX FIFO contents and restart */ @@ -1341,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) * within that register. */ else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* Set the Hash perfec mode */ mcr |= MAC_CR_HPFILT_; @@ -1349,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev) /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 position; - /* do we have a pointer here? */ - if (!cur_addr) - break; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if (!(*cur_addr->dmi_addr & 1)) - continue; + if (!(*ha->addr & 1)) + continue; /* upper 6 bits are used as hash index */ - position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; + position = ether_crc(ETH_ALEN, ha->addr)>>26; multicast_table[position>>5] |= 1 << (position&0x1f); } diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index f9a960e7fc1..e94521cf70a 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c @@ -64,7 +64,6 @@ static const char version[] = #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/crc32.h> @@ -417,7 +416,7 @@ static void smc_shutdown( int ioaddr ) /* - . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds ) + . Function: smc_setmulticast( int ioaddr, struct net_device *dev ) . Purpose: . This sets the internal hardware table to filter out unwanted multicast . packets before they take up memory. @@ -438,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev) { int i; unsigned char multicast_table[ 8 ]; - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* table for flipping the order of 3 bits */ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; /* start with a table of all zeros: reject all */ memset( multicast_table, 0, sizeof( multicast_table ) ); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { int position; - /* do we have a pointer here? */ - if ( !cur_addr ) - break; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if ( !( *cur_addr->dmi_addr & 1 ) ) + if (!(*ha->addr & 1)) continue; /* only use the low order bits */ - position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f; + position = ether_crc_le(6, ha->addr) & 0x3f; /* do some messy swapping to put the bit in the right spot */ multicast_table[invert3[position&7]] |= @@ -529,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, numPages = ((length & 0xfffe) + 6) / 256; if (numPages > 7 ) { - printk(CARDNAME": Far too big packet error. \n"); + printk(CARDNAME": Far too big packet error.\n"); /* freeing the packet is a good thing here... but should . any packets of this size get down here? */ dev_kfree_skb (skb); @@ -571,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, if ( !time_out ) { /* oh well, wait until the chip finds memory later */ SMC_ENABLE_INT( IM_ALLOC_INT ); - PRINTK2((CARDNAME": memory allocation deferred. \n")); + PRINTK2((CARDNAME": memory allocation deferred.\n")); /* it's deferred, but I'll handle it later */ - return NETDEV_TX_OK; + return NETDEV_TX_OK; } /* or YES! I can send the packet now.. */ smc_hardware_send_packet(dev); @@ -611,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) ioaddr = dev->base_addr; if ( !skb ) { - PRINTK((CARDNAME": In XMIT with no packet to send \n")); + PRINTK((CARDNAME": In XMIT with no packet to send\n")); return; } length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; @@ -621,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) packet_no = inb( ioaddr + PNR_ARR + 1 ); if ( packet_no & 0x80 ) { /* or isn't there? BAD CHIP! */ - printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n"); + printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n"); dev_kfree_skb_any(skb); lp->saved_skb = NULL; netif_wake_queue(dev); @@ -686,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) /* and let the chipset deal with it */ outw( MC_ENQUEUE , ioaddr + MMU_CMD ); - PRINTK2((CARDNAME": Sent packet of length %d \n",length)); + PRINTK2((CARDNAME": Sent packet of length %d\n", length)); lp->saved_skb = NULL; dev_kfree_skb_any (skb); @@ -938,7 +934,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { /* I don't recognize this chip, so... */ printk(CARDNAME ": IO %x: Unrecognized revision register:" - " %x, Contact author. \n", ioaddr, revision_register ); + " %x, Contact author.\n", ioaddr, revision_register); retval = -ENODEV; goto err_out; @@ -1075,7 +1071,7 @@ static void print_packet( byte * buf, int length ) int remainder; int lines; - printk("Packet of length %d \n", length ); + printk("Packet of length %d\n", length); lines = length / 16; remainder = length % 16; @@ -1202,7 +1198,7 @@ static void smc_rcv(struct net_device *dev) if ( packet_number & FP_RXEMPTY ) { /* we got called , but nothing was on the FIFO */ - PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n")); + PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n")); /* don't need to restore anything */ return; } @@ -1258,14 +1254,14 @@ static void smc_rcv(struct net_device *dev) to send the DWORDs or the bytes first, or some mixture. A mixture might improve already slow PIO performance */ - PRINTK3((" Reading %d dwords (and %d bytes) \n", + PRINTK3((" Reading %d dwords (and %d bytes)\n", packet_length >> 2, packet_length & 3 )); insl(ioaddr + DATA_1 , data, packet_length >> 2 ); /* read the left over bytes */ insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), packet_length & 0x3 ); #else - PRINTK3((" Reading %d words and %d byte(s) \n", + PRINTK3((" Reading %d words and %d byte(s)\n", (packet_length >> 1 ), packet_length & 1 )); insw(ioaddr + DATA_1 , data, packet_length >> 1); if ( packet_length & 1 ) { @@ -1334,7 +1330,7 @@ static void smc_tx( struct net_device * dev ) outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); tx_status = inw( ioaddr + DATA_1 ); - PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); + PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status)); dev->stats.tx_errors++; if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; @@ -1348,7 +1344,7 @@ static void smc_tx( struct net_device * dev ) #endif if ( tx_status & TS_SUCCESS ) { - printk(CARDNAME": Successful packet caused interrupt \n"); + printk(CARDNAME": Successful packet caused interrupt\n"); } /* re-enable transmit */ SMC_SELECT_BANK( 0 ); @@ -1394,7 +1390,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) int handled = 0; - PRINTK3((CARDNAME": SMC interrupt started \n")); + PRINTK3((CARDNAME": SMC interrupt started\n")); saved_bank = inw( ioaddr + BANK_SELECT ); @@ -1409,7 +1405,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) /* set a timeout value, so I don't stay here forever */ timeout = 4; - PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask )); + PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask)); do { /* read the status flag, and mask it */ status = inb( ioaddr + INTERRUPT ) & mask; @@ -1419,7 +1415,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) handled = 1; PRINTK3((KERN_WARNING CARDNAME - ": Handling interrupt status %x \n", status )); + ": Handling interrupt status %x\n", status)); if (status & IM_RCV_INT) { /* Got a packet(s). */ @@ -1453,7 +1449,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) } else if (status & IM_ALLOC_INT ) { PRINTK2((KERN_DEBUG CARDNAME - ": Allocation interrupt \n")); + ": Allocation interrupt\n")); /* clear this interrupt so it doesn't happen again */ mask &= ~IM_ALLOC_INT; @@ -1471,9 +1467,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) dev->stats.rx_fifo_errors++; outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); } else if (status & IM_EPH_INT ) { - PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); + PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n")); } else if (status & IM_ERCV_INT ) { - PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n")); + PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n")); outb( IM_ERCV_INT, ioaddr + INTERRUPT ); } } while ( timeout -- ); @@ -1483,7 +1479,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) SMC_SELECT_BANK( 2 ); outb( mask, ioaddr + INT_MASK ); - PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask )); + PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask)); outw( saved_pointer, ioaddr + POINTER ); SMC_SELECT_BANK( saved_bank ); diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index fc1b5a1a358..682bc4fe604 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -70,7 +70,6 @@ static const char version[] = #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/errno.h> @@ -1286,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) smc_phy_interrupt(dev); } else if (status & IM_ERCV_INT) { SMC_ACK_INT(lp, IM_ERCV_INT); - PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name); + PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name); } } while (--timeout); @@ -1413,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev) * within that register. */ else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* table for flipping the order of 3 bits */ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; @@ -1421,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev) /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { int position; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if (!(*cur_addr->dmi_addr & 1)) + if (!(*ha->addr & 1)) continue; /* only use the low order bits */ - position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; + position = crc32_le(~0, ha->addr, 6) & 0x3f; /* do some messy swapping to put the bit in the right spot */ multicast_table[invert3[position&7]] |= diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index a6ee883d1b0..8d2772cc42f 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -344,6 +344,34 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH +#elif defined(CONFIG_COLDFIRE) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +static inline void mcf_insw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + *wp++ = readw(a); +} + +static inline void mcf_outsw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + writew(*wp++, a); +} + +#define SMC_inw(a, r) _swapw(readw((a) + (r))) +#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r)) +#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) +#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) + +#define SMC_IRQ_FLAGS (IRQF_DISABLED) + #else /* diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 4fd1d8b3878..746fb91a0fb 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -41,7 +41,6 @@ #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/timer.h> #include <linux/bug.h> #include <linux/bitops.h> @@ -1383,13 +1382,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev) /* Enabling specific multicast addresses */ unsigned int hash_high = 0; unsigned int hash_low = 0; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; pdata->set_bits_mask = MAC_CR_HPFILT_; pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); - netdev_for_each_mc_addr(mc_list, dev) { - unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); unsigned int mask = 0x01 << (bitnum & 0x1F); if (bitnum & 0x20) diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index 30110a11d73..ada05c45217 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -26,6 +26,7 @@ #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/crc32.h> +#include <linux/slab.h> #include <asm/unaligned.h> #include "smsc9420.h" @@ -1063,12 +1064,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev) mac_cr |= MAC_CR_MCPAS_; mac_cr &= (~MAC_CR_HPFILT_); } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 hash_lo = 0, hash_hi = 0; smsc_dbg(HW, "Multicast filter enabled"); - netdev_for_each_mc_addr(mc_list, dev) { - u32 bit_num = smsc9420_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 bit_num = smsc9420_hash(ha->addr); u32 mask = 1 << (bit_num & 0x1F); if (bit_num & 0x20) @@ -1347,7 +1348,7 @@ static int smsc9420_open(struct net_device *dev) netif_carrier_off(dev); - /* disable, mask and acknowlege all interrupts */ + /* disable, mask and acknowledge all interrupts */ spin_lock_irqsave(&pd->int_lock, flags); int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); smsc9420_reg_write(pd, INT_CFG, int_cfg); diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c index 854ccf2b410..6b2a8881747 100644 --- a/drivers/net/sni_82596.c +++ b/drivers/net/sni_82596.c @@ -8,7 +8,6 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c index 287c251075e..e5d67327d70 100644 --- a/drivers/net/sonic.c +++ b/drivers/net/sonic.c @@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); unsigned int rcr; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *addr; int i; @@ -550,8 +550,8 @@ static void sonic_multicast_list(struct net_device *dev) netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; - netdev_for_each_mc_addr(dmi, dev) { - addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addr = ha->addr; sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 2f8a8c32021..3dff280b438 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -31,6 +31,7 @@ #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/init.h> +#include <linux/gfp.h> #include <linux/ioport.h> #include <linux/ip.h> #include <linux/kernel.h> @@ -40,7 +41,6 @@ #include <linux/device.h> #include <linux/pci.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/vmalloc.h> @@ -474,7 +474,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, * spider_net_enable_rxchtails - sets RX dmac chain tail addresses * @card: card structure * - * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the + * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the * chip by writing to the appropriate register. DMA is enabled in * spider_net_enable_rxdmac. */ @@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) static void spider_net_set_multi(struct net_device *netdev) { - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u8 hash; int i; u32 reg; @@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev) hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ set_bit(0xfd, bitmask); - netdev_for_each_mc_addr(mc, netdev) { - hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash = spider_net_get_multicast_hash(netdev, ha->addr); set_bit(hash, bitmask); } @@ -1820,7 +1820,7 @@ spider_net_enable_card(struct spider_net_card *card) spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); - /* set chain tail adress for RX chains and + /* set chain tail address for RX chains and * enable DMA */ spider_net_enable_rxchtails(card); spider_net_enable_rxdmac(card); diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 6dfa6989901..8a6d27cdc0b 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -1766,7 +1766,7 @@ static void set_rx_mode(struct net_device *dev) struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u32 rx_mode = MinVLANPrio; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; #ifdef VLAN_SUPPORT @@ -1804,8 +1804,8 @@ static void set_rx_mode(struct net_device *dev) /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; __be16 *eaddrs; - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (__be16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (__be16 *) ha->addr; writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; @@ -1825,10 +1825,10 @@ static void set_rx_mode(struct net_device *dev) __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { /* The chip uses the upper 9 CRC bits as index into the hash table */ - int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; + int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23; __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; *fptr |= cpu_to_le32(1 << (bit_nr & 31)); diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig index fb287649a30..eb63d44748a 100644 --- a/drivers/net/stmmac/Kconfig +++ b/drivers/net/stmmac/Kconfig @@ -2,6 +2,7 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" select MII select PHYLIB + select CRC32 depends on NETDEVICES && CPU_SUBTYPE_ST40 help This is the driver for the Ethernet IPs are built around a diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile index c776af15fe1..9691733ddb8 100644 --- a/drivers/net/stmmac/Makefile +++ b/drivers/net/stmmac/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ - dwmac100.o $(stmmac-y) + dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y) diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h index 2a58172e986..144f76fd3e3 100644 --- a/drivers/net/stmmac/common.h +++ b/drivers/net/stmmac/common.h @@ -22,8 +22,26 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#include "descs.h" #include <linux/netdevice.h> +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define STMMAC_VLAN_TAG_USED +#include <linux/if_vlan.h> +#endif + +#include "descs.h" + +#undef CHIP_DEBUG_PRINT +/* Turn-on extra printk debug for MAC core, dma and descriptors */ +/* #define CHIP_DEBUG_PRINT */ + +#ifdef CHIP_DEBUG_PRINT +#define CHIP_DBG(fmt, args...) printk(fmt, ## args) +#else +#define CHIP_DBG(fmt, args...) do { } while (0) +#endif + +#undef FRAME_FILTER_DEBUG +/* #define FRAME_FILTER_DEBUG */ struct stmmac_extra_stats { /* Transmit errors */ @@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], unsigned int high, unsigned int low); extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, unsigned int high, unsigned int low); +extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr); diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c deleted file mode 100644 index 803b0373d84..00000000000 --- a/drivers/net/stmmac/dwmac100.c +++ /dev/null @@ -1,537 +0,0 @@ -/******************************************************************************* - This is the driver for the MAC 10/100 on-chip Ethernet controller - currently tested on all the ST boards based on STb7109 and stx7200 SoCs. - - DWC Ether MAC 10/100 Universal version 4.0 has been used for developing - this code. - - Copyright (C) 2007-2009 STMicroelectronics Ltd - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> -*******************************************************************************/ - -#include <linux/crc32.h> -#include <linux/mii.h> -#include <linux/phy.h> - -#include "common.h" -#include "dwmac100.h" -#include "dwmac_dma.h" - -#undef DWMAC100_DEBUG -/*#define DWMAC100_DEBUG*/ -#ifdef DWMAC100_DEBUG -#define DBG(fmt, args...) printk(fmt, ## args) -#else -#define DBG(fmt, args...) do { } while (0) -#endif - -static void dwmac100_core_init(unsigned long ioaddr) -{ - u32 value = readl(ioaddr + MAC_CONTROL); - - writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); - -#ifdef STMMAC_VLAN_TAG_USED - writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); -#endif - return; -} - -static void dwmac100_dump_mac_regs(unsigned long ioaddr) -{ - pr_info("\t----------------------------------------------\n" - "\t DWMAC 100 CSR (base addr = 0x%8x)\n" - "\t----------------------------------------------\n", - (unsigned int)ioaddr); - pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, - readl(ioaddr + MAC_CONTROL)); - pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, - readl(ioaddr + MAC_ADDR_HIGH)); - pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, - readl(ioaddr + MAC_ADDR_LOW)); - pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", - MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); - pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", - MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); - pr_info("\tflow control (offset 0x%x): 0x%08x\n", - MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); - pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, - readl(ioaddr + MAC_VLAN1)); - pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, - readl(ioaddr + MAC_VLAN2)); - pr_info("\n\tMAC management counter registers\n"); - pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", - MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); - pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", - MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); - pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", - MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); - pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", - MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); - pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", - MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); - return; -} - -static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, - u32 dma_rx) -{ - u32 value = readl(ioaddr + DMA_BUS_MODE); - /* DMA SW reset */ - value |= DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + DMA_BUS_MODE); - do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); - - /* Enable Application Access by writing to DMA CSR0 */ - writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), - ioaddr + DMA_BUS_MODE); - - /* Mask interrupts by writing to CSR7 */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); - - /* The base address of the RX/TX descriptor lists must be written into - * DMA CSR3 and CSR4, respectively. */ - writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); - writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); - - return 0; -} - -/* Store and Forward capability is not used at all.. - * The transmit threshold can be programmed by - * setting the TTC bits in the DMA control register.*/ -static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode, - int rxmode) -{ - u32 csr6 = readl(ioaddr + DMA_CONTROL); - - if (txmode <= 32) - csr6 |= DMA_CONTROL_TTC_32; - else if (txmode <= 64) - csr6 |= DMA_CONTROL_TTC_64; - else - csr6 |= DMA_CONTROL_TTC_128; - - writel(csr6, ioaddr + DMA_CONTROL); - - return; -} - -static void dwmac100_dump_dma_regs(unsigned long ioaddr) -{ - int i; - - DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n"); - for (i = 0; i < 9; i++) - pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, - (DMA_BUS_MODE + i * 4), - readl(ioaddr + DMA_BUS_MODE + i * 4)); - DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", - DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); - DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", - DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); - return; -} - -/* DMA controller has two counters to track the number of - * the receive missed frames. */ -static void dwmac100_dma_diagnostic_fr(void *data, - struct stmmac_extra_stats *x, - unsigned long ioaddr) -{ - struct net_device_stats *stats = (struct net_device_stats *)data; - u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); - - if (unlikely(csr8)) { - if (csr8 & DMA_MISSED_FRAME_OVE) { - stats->rx_over_errors += 0x800; - x->rx_overflow_cntr += 0x800; - } else { - unsigned int ove_cntr; - ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); - stats->rx_over_errors += ove_cntr; - x->rx_overflow_cntr += ove_cntr; - } - - if (csr8 & DMA_MISSED_FRAME_OVE_M) { - stats->rx_missed_errors += 0xffff; - x->rx_missed_cntr += 0xffff; - } else { - unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); - stats->rx_missed_errors += miss_f; - x->rx_missed_cntr += miss_f; - } - } - return; -} - -static int dwmac100_get_tx_frame_status(void *data, - struct stmmac_extra_stats *x, - struct dma_desc *p, unsigned long ioaddr) -{ - int ret = 0; - struct net_device_stats *stats = (struct net_device_stats *)data; - - if (unlikely(p->des01.tx.error_summary)) { - if (unlikely(p->des01.tx.underflow_error)) { - x->tx_underflow++; - stats->tx_fifo_errors++; - } - if (unlikely(p->des01.tx.no_carrier)) { - x->tx_carrier++; - stats->tx_carrier_errors++; - } - if (unlikely(p->des01.tx.loss_carrier)) { - x->tx_losscarrier++; - stats->tx_carrier_errors++; - } - if (unlikely((p->des01.tx.excessive_deferral) || - (p->des01.tx.excessive_collisions) || - (p->des01.tx.late_collision))) - stats->collisions += p->des01.tx.collision_count; - ret = -1; - } - if (unlikely(p->des01.tx.heartbeat_fail)) { - x->tx_heartbeat++; - stats->tx_heartbeat_errors++; - ret = -1; - } - if (unlikely(p->des01.tx.deferred)) - x->tx_deferred++; - - return ret; -} - -static int dwmac100_get_tx_len(struct dma_desc *p) -{ - return p->des01.tx.buffer1_size; -} - -/* This function verifies if each incoming frame has some errors - * and, if required, updates the multicast statistics. - * In case of success, it returns csum_none becasue the device - * is not able to compute the csum in HW. */ -static int dwmac100_get_rx_frame_status(void *data, - struct stmmac_extra_stats *x, - struct dma_desc *p) -{ - int ret = csum_none; - struct net_device_stats *stats = (struct net_device_stats *)data; - - if (unlikely(p->des01.rx.last_descriptor == 0)) { - pr_warning("dwmac100 Error: Oversized Ethernet " - "frame spanned multiple buffers\n"); - stats->rx_length_errors++; - return discard_frame; - } - - if (unlikely(p->des01.rx.error_summary)) { - if (unlikely(p->des01.rx.descriptor_error)) - x->rx_desc++; - if (unlikely(p->des01.rx.partial_frame_error)) - x->rx_partial++; - if (unlikely(p->des01.rx.run_frame)) - x->rx_runt++; - if (unlikely(p->des01.rx.frame_too_long)) - x->rx_toolong++; - if (unlikely(p->des01.rx.collision)) { - x->rx_collision++; - stats->collisions++; - } - if (unlikely(p->des01.rx.crc_error)) { - x->rx_crc++; - stats->rx_crc_errors++; - } - ret = discard_frame; - } - if (unlikely(p->des01.rx.dribbling)) - ret = discard_frame; - - if (unlikely(p->des01.rx.length_error)) { - x->rx_length++; - ret = discard_frame; - } - if (unlikely(p->des01.rx.mii_error)) { - x->rx_mii++; - ret = discard_frame; - } - if (p->des01.rx.multicast_frame) { - x->rx_multicast++; - stats->multicast++; - } - return ret; -} - -static void dwmac100_irq_status(unsigned long ioaddr) -{ - return; -} - -static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, - unsigned int reg_n) -{ - stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); -} - -static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, - unsigned int reg_n) -{ - stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); -} - -static void dwmac100_set_filter(struct net_device *dev) -{ - unsigned long ioaddr = dev->base_addr; - u32 value = readl(ioaddr + MAC_CONTROL); - - if (dev->flags & IFF_PROMISC) { - value |= MAC_CONTROL_PR; - value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | - MAC_CONTROL_HP); - } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) - || (dev->flags & IFF_ALLMULTI)) { - value |= MAC_CONTROL_PM; - value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); - writel(0xffffffff, ioaddr + MAC_HASH_HIGH); - writel(0xffffffff, ioaddr + MAC_HASH_LOW); - } else if (netdev_mc_empty(dev)) { /* no multicast */ - value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | - MAC_CONTROL_HO | MAC_CONTROL_HP); - } else { - u32 mc_filter[2]; - struct dev_mc_list *mclist; - - /* Perfect filter mode for physical address and Hash - filter for multicast */ - value |= MAC_CONTROL_HP; - value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | - MAC_CONTROL_IF | MAC_CONTROL_HO); - - memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - /* The upper 6 bits of the calculated CRC are used to - * index the contens of the hash table */ - int bit_nr = - ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; - /* The most significant bit determines the register to - * use (H/L) while the other 5 bits determine the bit - * within the register. */ - mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - } - writel(mc_filter[0], ioaddr + MAC_HASH_LOW); - writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); - } - - writel(value, ioaddr + MAC_CONTROL); - - DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " - "HI 0x%08x, LO 0x%08x\n", - __func__, readl(ioaddr + MAC_CONTROL), - readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); - return; -} - -static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, - unsigned int fc, unsigned int pause_time) -{ - unsigned int flow = MAC_FLOW_CTRL_ENABLE; - - if (duplex) - flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); - writel(flow, ioaddr + MAC_FLOW_CTRL); - - return; -} - -/* No PMT module supported for this Ethernet Controller. - * Tested on ST platforms only. - */ -static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode) -{ - return; -} - -static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, - int disable_rx_ic) -{ - int i; - for (i = 0; i < ring_size; i++) { - p->des01.rx.own = 1; - p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; - if (i == ring_size - 1) - p->des01.rx.end_ring = 1; - if (disable_rx_ic) - p->des01.rx.disable_ic = 1; - p++; - } - return; -} - -static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) -{ - int i; - for (i = 0; i < ring_size; i++) { - p->des01.tx.own = 0; - if (i == ring_size - 1) - p->des01.tx.end_ring = 1; - p++; - } - return; -} - -static int dwmac100_get_tx_owner(struct dma_desc *p) -{ - return p->des01.tx.own; -} - -static int dwmac100_get_rx_owner(struct dma_desc *p) -{ - return p->des01.rx.own; -} - -static void dwmac100_set_tx_owner(struct dma_desc *p) -{ - p->des01.tx.own = 1; -} - -static void dwmac100_set_rx_owner(struct dma_desc *p) -{ - p->des01.rx.own = 1; -} - -static int dwmac100_get_tx_ls(struct dma_desc *p) -{ - return p->des01.tx.last_segment; -} - -static void dwmac100_release_tx_desc(struct dma_desc *p) -{ - int ter = p->des01.tx.end_ring; - - /* clean field used within the xmit */ - p->des01.tx.first_segment = 0; - p->des01.tx.last_segment = 0; - p->des01.tx.buffer1_size = 0; - - /* clean status reported */ - p->des01.tx.error_summary = 0; - p->des01.tx.underflow_error = 0; - p->des01.tx.no_carrier = 0; - p->des01.tx.loss_carrier = 0; - p->des01.tx.excessive_deferral = 0; - p->des01.tx.excessive_collisions = 0; - p->des01.tx.late_collision = 0; - p->des01.tx.heartbeat_fail = 0; - p->des01.tx.deferred = 0; - - /* set termination field */ - p->des01.tx.end_ring = ter; - - return; -} - -static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag) -{ - p->des01.tx.first_segment = is_fs; - p->des01.tx.buffer1_size = len; -} - -static void dwmac100_clear_tx_ic(struct dma_desc *p) -{ - p->des01.tx.interrupt = 0; -} - -static void dwmac100_close_tx_desc(struct dma_desc *p) -{ - p->des01.tx.last_segment = 1; - p->des01.tx.interrupt = 1; -} - -static int dwmac100_get_rx_frame_len(struct dma_desc *p) -{ - return p->des01.rx.frame_length; -} - -struct stmmac_ops dwmac100_ops = { - .core_init = dwmac100_core_init, - .dump_regs = dwmac100_dump_mac_regs, - .host_irq_status = dwmac100_irq_status, - .set_filter = dwmac100_set_filter, - .flow_ctrl = dwmac100_flow_ctrl, - .pmt = dwmac100_pmt, - .set_umac_addr = dwmac100_set_umac_addr, - .get_umac_addr = dwmac100_get_umac_addr, -}; - -struct stmmac_dma_ops dwmac100_dma_ops = { - .init = dwmac100_dma_init, - .dump_regs = dwmac100_dump_dma_regs, - .dma_mode = dwmac100_dma_operation_mode, - .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, - .enable_dma_transmission = dwmac_enable_dma_transmission, - .enable_dma_irq = dwmac_enable_dma_irq, - .disable_dma_irq = dwmac_disable_dma_irq, - .start_tx = dwmac_dma_start_tx, - .stop_tx = dwmac_dma_stop_tx, - .start_rx = dwmac_dma_start_rx, - .stop_rx = dwmac_dma_stop_rx, - .dma_interrupt = dwmac_dma_interrupt, -}; - -struct stmmac_desc_ops dwmac100_desc_ops = { - .tx_status = dwmac100_get_tx_frame_status, - .rx_status = dwmac100_get_rx_frame_status, - .get_tx_len = dwmac100_get_tx_len, - .init_rx_desc = dwmac100_init_rx_desc, - .init_tx_desc = dwmac100_init_tx_desc, - .get_tx_owner = dwmac100_get_tx_owner, - .get_rx_owner = dwmac100_get_rx_owner, - .release_tx_desc = dwmac100_release_tx_desc, - .prepare_tx_desc = dwmac100_prepare_tx_desc, - .clear_tx_ic = dwmac100_clear_tx_ic, - .close_tx_desc = dwmac100_close_tx_desc, - .get_tx_ls = dwmac100_get_tx_ls, - .set_tx_owner = dwmac100_set_tx_owner, - .set_rx_owner = dwmac100_set_rx_owner, - .get_rx_frame_len = dwmac100_get_rx_frame_len, -}; - -struct mac_device_info *dwmac100_setup(unsigned long ioaddr) -{ - struct mac_device_info *mac; - - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - - pr_info("\tDWMAC100\n"); - - mac->mac = &dwmac100_ops; - mac->desc = &dwmac100_desc_ops; - mac->dma = &dwmac100_dma_ops; - - mac->pmt = PMT_NOT_SUPPORTED; - mac->link.port = MAC_CONTROL_PS; - mac->link.duplex = MAC_CONTROL_F; - mac->link.speed = 0; - mac->mii.addr = MAC_MII_ADDR; - mac->mii.data = MAC_MII_DATA; - - return mac; -} diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h index 0f8f110d004..97956cbf1cb 100644 --- a/drivers/net/stmmac/dwmac100.h +++ b/drivers/net/stmmac/dwmac100.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#include <linux/phy.h> +#include "common.h" + /*---------------------------------------------------------------------------- * MAC BLOCK defines *---------------------------------------------------------------------------*/ @@ -114,3 +117,5 @@ enum ttc_control { #define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ #define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ + +extern struct stmmac_dma_ops dwmac100_dma_ops; diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h index 62dca0e384e..d8d0f355377 100644 --- a/drivers/net/stmmac/dwmac1000.h +++ b/drivers/net/stmmac/dwmac1000.h @@ -172,7 +172,6 @@ enum rfd { deac_full_minus_4 = 0x00401800, }; #define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ -#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ enum ttc_control { DMA_CONTROL_TTC_64 = 0x00000000, @@ -206,15 +205,4 @@ enum rtc_control { #define GMAC_MMC_TX_INTR 0x108 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 -#undef DWMAC1000_DEBUG -/* #define DWMAC1000__DEBUG */ -#undef FRAME_FILTER_DEBUG -/* #define FRAME_FILTER_DEBUG */ -#ifdef DWMAC1000__DEBUG -#define DBG(fmt, args...) printk(fmt, ## args) -#else -#define DBG(fmt, args...) do { } while (0) -#endif - extern struct stmmac_dma_ops dwmac1000_dma_ops; -extern struct stmmac_desc_ops dwmac1000_desc_ops; diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c index a6538ae4694..0aa89ae9b8e 100644 --- a/drivers/net/stmmac/dwmac1000_core.c +++ b/drivers/net/stmmac/dwmac1000_core.c @@ -27,6 +27,7 @@ *******************************************************************************/ #include <linux/crc32.h> +#include <linux/slab.h> #include "dwmac1000.h" static void dwmac1000_core_init(unsigned long ioaddr) @@ -82,8 +83,8 @@ static void dwmac1000_set_filter(struct net_device *dev) unsigned long ioaddr = dev->base_addr; unsigned int value = 0; - DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", - __func__, netdev_mc_count(dev), netdev_uc_count(dev)); + CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", + __func__, netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value = GMAC_FRAME_FILTER_PR; @@ -94,17 +95,17 @@ static void dwmac1000_set_filter(struct net_device *dev) writel(0xffffffff, ioaddr + GMAC_HASH_LOW); } else if (!netdev_mc_empty(dev)) { u32 mc_filter[2]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* Hash filter for multicast */ value = GMAC_FRAME_FILTER_HMC; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { /* The upper 6 bits of the calculated CRC are used to index the contens of the hash table */ int bit_nr = - bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; + bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ @@ -135,7 +136,7 @@ static void dwmac1000_set_filter(struct net_device *dev) #endif writel(value, ioaddr + GMAC_FRAME_FILTER); - DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " + CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); @@ -147,18 +148,18 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex, { unsigned int flow = 0; - DBG(KERN_DEBUG "GMAC Flow-Control:\n"); + CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); if (fc & FLOW_RX) { - DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); + CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_RFE; } if (fc & FLOW_TX) { - DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); + CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_TFE; } if (duplex) { - DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); + CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); } @@ -171,10 +172,10 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode) unsigned int pmt = 0; if (mode == WAKE_MAGIC) { - DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); + CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); pmt |= power_down | magic_pkt_en; } else if (mode == WAKE_UCAST) { - DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); + CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); pmt |= global_unicast; } @@ -189,16 +190,16 @@ static void dwmac1000_irq_status(unsigned long ioaddr) /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & mmc_tx_irq)) - DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", + CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_TX_INTR)); if (unlikely(intr_status & mmc_rx_irq)) - DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", + CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_INTR)); if (unlikely(intr_status & mmc_rx_csum_offload_irq)) - DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", + CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); if (unlikely(intr_status & pmt_irq)) { - DBG(KERN_DEBUG "GMAC: received Magic frame\n"); + CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT * status register. */ readl(ioaddr + GMAC_PMT); @@ -229,7 +230,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr) mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); mac->mac = &dwmac1000_ops; - mac->desc = &dwmac1000_desc_ops; mac->dma = &dwmac1000_dma_ops; mac->pmt = PMT_SUPPORTED; diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c index 39d436a2da6..a547aa99e11 100644 --- a/drivers/net/stmmac/dwmac1000_dma.c +++ b/drivers/net/stmmac/dwmac1000_dma.c @@ -3,7 +3,7 @@ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for developing this code. - This contains the functions to handle the dma and descriptors. + This contains the functions to handle the dma. Copyright (C) 2007-2009 STMicroelectronics Ltd @@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, return 0; } -/* Transmit FIFO flush operation */ -static void dwmac1000_flush_tx_fifo(unsigned long ioaddr) -{ - u32 csr6 = readl(ioaddr + DMA_CONTROL); - writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); - - do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); -} - static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, int rxmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); if (txmode == SF_DMA_MODE) { - DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); + CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; /* Operating on second frame increase the performance * especially when transmit store-and-forward is used.*/ csr6 |= DMA_CONTROL_OSF; } else { - DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" + CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" " (threshold = %d)\n", txmode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; @@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, } if (rxmode == SF_DMA_MODE) { - DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); + CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); csr6 |= DMA_CONTROL_RSF; } else { - DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" + CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" " (threshold = %d)\n", rxmode); csr6 &= ~DMA_CONTROL_RSF; csr6 &= DMA_CONTROL_TC_RX_MASK; @@ -141,305 +132,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr) return; } -static int dwmac1000_get_tx_frame_status(void *data, - struct stmmac_extra_stats *x, - struct dma_desc *p, unsigned long ioaddr) -{ - int ret = 0; - struct net_device_stats *stats = (struct net_device_stats *)data; - - if (unlikely(p->des01.etx.error_summary)) { - DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); - if (unlikely(p->des01.etx.jabber_timeout)) { - DBG(KERN_ERR "\tjabber_timeout error\n"); - x->tx_jabber++; - } - - if (unlikely(p->des01.etx.frame_flushed)) { - DBG(KERN_ERR "\tframe_flushed error\n"); - x->tx_frame_flushed++; - dwmac1000_flush_tx_fifo(ioaddr); - } - - if (unlikely(p->des01.etx.loss_carrier)) { - DBG(KERN_ERR "\tloss_carrier error\n"); - x->tx_losscarrier++; - stats->tx_carrier_errors++; - } - if (unlikely(p->des01.etx.no_carrier)) { - DBG(KERN_ERR "\tno_carrier error\n"); - x->tx_carrier++; - stats->tx_carrier_errors++; - } - if (unlikely(p->des01.etx.late_collision)) { - DBG(KERN_ERR "\tlate_collision error\n"); - stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_collisions)) { - DBG(KERN_ERR "\texcessive_collisions\n"); - stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_deferral)) { - DBG(KERN_INFO "\texcessive tx_deferral\n"); - x->tx_deferred++; - } - - if (unlikely(p->des01.etx.underflow_error)) { - DBG(KERN_ERR "\tunderflow error\n"); - dwmac1000_flush_tx_fifo(ioaddr); - x->tx_underflow++; - } - - if (unlikely(p->des01.etx.ip_header_error)) { - DBG(KERN_ERR "\tTX IP header csum error\n"); - x->tx_ip_header_error++; - } - - if (unlikely(p->des01.etx.payload_error)) { - DBG(KERN_ERR "\tAddr/Payload csum error\n"); - x->tx_payload_error++; - dwmac1000_flush_tx_fifo(ioaddr); - } - - ret = -1; - } - - if (unlikely(p->des01.etx.deferred)) { - DBG(KERN_INFO "GMAC TX status: tx deferred\n"); - x->tx_deferred++; - } -#ifdef STMMAC_VLAN_TAG_USED - if (p->des01.etx.vlan_frame) { - DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); - x->tx_vlan++; - } -#endif - - return ret; -} - -static int dwmac1000_get_tx_len(struct dma_desc *p) -{ - return p->des01.etx.buffer1_size; -} - -static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err) -{ - int ret = good_frame; - u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; - - /* bits 5 7 0 | Frame status - * ---------------------------------------------------------- - * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) - * 1 0 0 | IPv4/6 No CSUM errorS. - * 1 0 1 | IPv4/6 CSUM PAYLOAD error - * 1 1 0 | IPv4/6 CSUM IP HR error - * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS - * 0 0 1 | IPv4/6 unsupported IP PAYLOAD - * 0 1 1 | COE bypassed.. no IPv4/6 frame - * 0 1 0 | Reserved. - */ - if (status == 0x0) { - DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); - ret = good_frame; - } else if (status == 0x4) { - DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); - ret = good_frame; - } else if (status == 0x5) { - DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); - ret = csum_none; - } else if (status == 0x6) { - DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); - ret = csum_none; - } else if (status == 0x7) { - DBG(KERN_ERR - "RX Des0 status: IPv4/6 Header and Payload Error.\n"); - ret = csum_none; - } else if (status == 0x1) { - DBG(KERN_ERR - "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); - ret = discard_frame; - } else if (status == 0x3) { - DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); - ret = discard_frame; - } - return ret; -} - -static int dwmac1000_get_rx_frame_status(void *data, - struct stmmac_extra_stats *x, struct dma_desc *p) -{ - int ret = good_frame; - struct net_device_stats *stats = (struct net_device_stats *)data; - - if (unlikely(p->des01.erx.error_summary)) { - DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx); - if (unlikely(p->des01.erx.descriptor_error)) { - DBG(KERN_ERR "\tdescriptor error\n"); - x->rx_desc++; - stats->rx_length_errors++; - } - if (unlikely(p->des01.erx.overflow_error)) { - DBG(KERN_ERR "\toverflow error\n"); - x->rx_gmac_overflow++; - } - - if (unlikely(p->des01.erx.ipc_csum_error)) - DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); - - if (unlikely(p->des01.erx.late_collision)) { - DBG(KERN_ERR "\tlate_collision error\n"); - stats->collisions++; - stats->collisions++; - } - if (unlikely(p->des01.erx.receive_watchdog)) { - DBG(KERN_ERR "\treceive_watchdog error\n"); - x->rx_watchdog++; - } - if (unlikely(p->des01.erx.error_gmii)) { - DBG(KERN_ERR "\tReceive Error\n"); - x->rx_mii++; - } - if (unlikely(p->des01.erx.crc_error)) { - DBG(KERN_ERR "\tCRC error\n"); - x->rx_crc++; - stats->rx_crc_errors++; - } - ret = discard_frame; - } - - /* After a payload csum error, the ES bit is set. - * It doesn't match with the information reported into the databook. - * At any rate, we need to understand if the CSUM hw computation is ok - * and report this info to the upper layers. */ - ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error, - p->des01.erx.frame_type, p->des01.erx.payload_csum_error); - - if (unlikely(p->des01.erx.dribbling)) { - DBG(KERN_ERR "GMAC RX: dribbling error\n"); - ret = discard_frame; - } - if (unlikely(p->des01.erx.sa_filter_fail)) { - DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); - x->sa_rx_filter_fail++; - ret = discard_frame; - } - if (unlikely(p->des01.erx.da_filter_fail)) { - DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n"); - x->da_rx_filter_fail++; - ret = discard_frame; - } - if (unlikely(p->des01.erx.length_error)) { - DBG(KERN_ERR "GMAC RX: length_error error\n"); - x->rx_length++; - ret = discard_frame; - } -#ifdef STMMAC_VLAN_TAG_USED - if (p->des01.erx.vlan_tag) { - DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); - x->rx_vlan++; - } -#endif - return ret; -} - -static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size, - int disable_rx_ic) -{ - int i; - for (i = 0; i < ring_size; i++) { - p->des01.erx.own = 1; - p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; - /* To support jumbo frames */ - p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; - if (i == ring_size - 1) - p->des01.erx.end_ring = 1; - if (disable_rx_ic) - p->des01.erx.disable_ic = 1; - p++; - } - return; -} - -static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size) -{ - int i; - - for (i = 0; i < ring_size; i++) { - p->des01.etx.own = 0; - if (i == ring_size - 1) - p->des01.etx.end_ring = 1; - p++; - } - - return; -} - -static int dwmac1000_get_tx_owner(struct dma_desc *p) -{ - return p->des01.etx.own; -} - -static int dwmac1000_get_rx_owner(struct dma_desc *p) -{ - return p->des01.erx.own; -} - -static void dwmac1000_set_tx_owner(struct dma_desc *p) -{ - p->des01.etx.own = 1; -} - -static void dwmac1000_set_rx_owner(struct dma_desc *p) -{ - p->des01.erx.own = 1; -} - -static int dwmac1000_get_tx_ls(struct dma_desc *p) -{ - return p->des01.etx.last_segment; -} - -static void dwmac1000_release_tx_desc(struct dma_desc *p) -{ - int ter = p->des01.etx.end_ring; - - memset(p, 0, sizeof(struct dma_desc)); - p->des01.etx.end_ring = ter; - - return; -} - -static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag) -{ - p->des01.etx.first_segment = is_fs; - if (unlikely(len > BUF_SIZE_4KiB)) { - p->des01.etx.buffer1_size = BUF_SIZE_4KiB; - p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; - } else { - p->des01.etx.buffer1_size = len; - } - if (likely(csum_flag)) - p->des01.etx.checksum_insertion = cic_full; -} - -static void dwmac1000_clear_tx_ic(struct dma_desc *p) -{ - p->des01.etx.interrupt = 0; -} - -static void dwmac1000_close_tx_desc(struct dma_desc *p) -{ - p->des01.etx.last_segment = 1; - p->des01.etx.interrupt = 1; -} - -static int dwmac1000_get_rx_frame_len(struct dma_desc *p) -{ - return p->des01.erx.frame_length; -} - struct stmmac_dma_ops dwmac1000_dma_ops = { .init = dwmac1000_dma_init, .dump_regs = dwmac1000_dump_dma_regs, @@ -454,21 +146,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = { .stop_rx = dwmac_dma_stop_rx, .dma_interrupt = dwmac_dma_interrupt, }; - -struct stmmac_desc_ops dwmac1000_desc_ops = { - .tx_status = dwmac1000_get_tx_frame_status, - .rx_status = dwmac1000_get_rx_frame_status, - .get_tx_len = dwmac1000_get_tx_len, - .init_rx_desc = dwmac1000_init_rx_desc, - .init_tx_desc = dwmac1000_init_tx_desc, - .get_tx_owner = dwmac1000_get_tx_owner, - .get_rx_owner = dwmac1000_get_rx_owner, - .release_tx_desc = dwmac1000_release_tx_desc, - .prepare_tx_desc = dwmac1000_prepare_tx_desc, - .clear_tx_ic = dwmac1000_clear_tx_ic, - .close_tx_desc = dwmac1000_close_tx_desc, - .get_tx_ls = dwmac1000_get_tx_ls, - .set_tx_owner = dwmac1000_set_tx_owner, - .set_rx_owner = dwmac1000_set_rx_owner, - .get_rx_frame_len = dwmac1000_get_rx_frame_len, -}; diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c new file mode 100644 index 00000000000..fab14a4cb14 --- /dev/null +++ b/drivers/net/stmmac/dwmac100_core.c @@ -0,0 +1,201 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/crc32.h> +#include "dwmac100.h" + +static void dwmac100_core_init(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CONTROL); + + writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif + return; +} + +static void dwmac100_dump_mac_regs(unsigned long ioaddr) +{ + pr_info("\t----------------------------------------------\n" + "\t DWMAC 100 CSR (base addr = 0x%8x)\n" + "\t----------------------------------------------\n", + (unsigned int)ioaddr); + pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, + readl(ioaddr + MAC_CONTROL)); + pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, + readl(ioaddr + MAC_ADDR_HIGH)); + pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, + readl(ioaddr + MAC_ADDR_LOW)); + pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", + MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); + pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", + MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); + pr_info("\tflow control (offset 0x%x): 0x%08x\n", + MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); + pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, + readl(ioaddr + MAC_VLAN1)); + pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, + readl(ioaddr + MAC_VLAN2)); + pr_info("\n\tMAC management counter registers\n"); + pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", + MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); + pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); + pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); + pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); + pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); + return; +} + +static void dwmac100_irq_status(unsigned long ioaddr) +{ + return; +} + +static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_set_filter(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (netdev_mc_empty(dev)) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Perfect filter mode for physical address and Hash + filter for multicast */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | + MAC_CONTROL_IF | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table */ + int bit_nr = + ether_crc(ETH_ALEN, ha->addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); + + CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " + "HI 0x%08x, LO 0x%08x\n", + __func__, readl(ioaddr + MAC_CONTROL), + readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); + return; +} + +static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); + + return; +} + +/* No PMT module supported for this Ethernet Controller. + * Tested on ST platforms only. + */ +static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode) +{ + return; +} + +struct stmmac_ops dwmac100_ops = { + .core_init = dwmac100_core_init, + .dump_regs = dwmac100_dump_mac_regs, + .host_irq_status = dwmac100_irq_status, + .set_filter = dwmac100_set_filter, + .flow_ctrl = dwmac100_flow_ctrl, + .pmt = dwmac100_pmt, + .set_umac_addr = dwmac100_set_umac_addr, + .get_umac_addr = dwmac100_get_umac_addr, +}; + +struct mac_device_info *dwmac100_setup(unsigned long ioaddr) +{ + struct mac_device_info *mac; + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + + pr_info("\tDWMAC100\n"); + + mac->mac = &dwmac100_ops; + mac->dma = &dwmac100_dma_ops; + + mac->pmt = PMT_NOT_SUPPORTED; + mac->link.port = MAC_CONTROL_PS; + mac->link.duplex = MAC_CONTROL_F; + mac->link.speed = 0; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; + + return mac; +} diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c new file mode 100644 index 00000000000..96d098d68ad --- /dev/null +++ b/drivers/net/stmmac/dwmac100_dma.c @@ -0,0 +1,138 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include "dwmac100.h" +#include "dwmac_dma.h" + +static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, + u32 dma_rx) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); + + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* The base address of the RX/TX descriptor lists must be written into + * DMA CSR3 and CSR4, respectively. */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Store and Forward capability is not used at all.. + * The transmit threshold can be programmed by + * setting the TTC bits in the DMA control register.*/ +static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode, + int rxmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); + + return; +} + +static void dwmac100_dump_dma_regs(unsigned long ioaddr) +{ + int i; + + CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); + for (i = 0; i < 9; i++) + pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + i * 4), + readl(ioaddr + DMA_BUS_MODE + i * 4)); + CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); + CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", + DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); + return; +} + +/* DMA controller has two counters to track the number of + * the receive missed frames. */ +static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } + return; +} + +struct stmmac_dma_ops dwmac100_dma_ops = { + .init = dwmac100_dma_init, + .dump_regs = dwmac100_dump_dma_regs, + .dma_mode = dwmac100_dma_operation_mode, + .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, +}; diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h index de848d9f606..7b815a1b7b8 100644 --- a/drivers/net/stmmac/dwmac_dma.h +++ b/drivers/net/stmmac/dwmac_dma.h @@ -95,6 +95,7 @@ #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ extern void dwmac_enable_dma_transmission(unsigned long ioaddr); extern void dwmac_enable_dma_irq(unsigned long ioaddr); diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c index d4adb1eaa44..0a504adb7eb 100644 --- a/drivers/net/stmmac/dwmac_lib.c +++ b/drivers/net/stmmac/dwmac_lib.c @@ -227,6 +227,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr, return ret; } +void dwmac_dma_flush_tx_fifo(unsigned long ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], unsigned int high, unsigned int low) diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c new file mode 100644 index 00000000000..eb5684a1f71 --- /dev/null +++ b/drivers/net/stmmac/enh_desc.c @@ -0,0 +1,342 @@ +/******************************************************************************* + This contains the functions to handle the enhanced descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include "common.h" + +static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.etx.error_summary)) { + CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); + if (unlikely(p->des01.etx.jabber_timeout)) { + CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); + x->tx_jabber++; + } + + if (unlikely(p->des01.etx.frame_flushed)) { + CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); + x->tx_frame_flushed++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + if (unlikely(p->des01.etx.loss_carrier)) { + CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.no_carrier)) { + CHIP_DBG(KERN_ERR "\tno_carrier error\n"); + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.late_collision)) { + CHIP_DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_collisions)) { + CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_deferral)) { + CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); + x->tx_deferred++; + } + + if (unlikely(p->des01.etx.underflow_error)) { + CHIP_DBG(KERN_ERR "\tunderflow error\n"); + dwmac_dma_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(p->des01.etx.ip_header_error)) { + CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); + x->tx_ip_header_error++; + } + + if (unlikely(p->des01.etx.payload_error)) { + CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); + x->tx_payload_error++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + ret = -1; + } + + if (unlikely(p->des01.etx.deferred)) { + CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); + x->tx_deferred++; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.etx.vlan_frame) { + CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + x->tx_vlan++; + } +#endif + + return ret; +} + +static int enh_desc_get_tx_len(struct dma_desc *p) +{ + return p->des01.etx.buffer1_size; +} + +static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) { + CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); + ret = good_frame; + } else if (status == 0x4) { + CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); + ret = good_frame; + } else if (status == 0x5) { + CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); + ret = csum_none; + } else if (status == 0x6) { + CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); + ret = csum_none; + } else if (status == 0x7) { + CHIP_DBG(KERN_ERR + "RX Des0 status: IPv4/6 Header and Payload Error.\n"); + ret = csum_none; + } else if (status == 0x1) { + CHIP_DBG(KERN_ERR + "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); + ret = discard_frame; + } else if (status == 0x3) { + CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); + ret = discard_frame; + } + return ret; +} + +static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.erx.error_summary)) { + CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", + p->des01.erx); + if (unlikely(p->des01.erx.descriptor_error)) { + CHIP_DBG(KERN_ERR "\tdescriptor error\n"); + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(p->des01.erx.overflow_error)) { + CHIP_DBG(KERN_ERR "\toverflow error\n"); + x->rx_gmac_overflow++; + } + + if (unlikely(p->des01.erx.ipc_csum_error)) + CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); + + if (unlikely(p->des01.erx.late_collision)) { + CHIP_DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions++; + stats->collisions++; + } + if (unlikely(p->des01.erx.receive_watchdog)) { + CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); + x->rx_watchdog++; + } + if (unlikely(p->des01.erx.error_gmii)) { + CHIP_DBG(KERN_ERR "\tReceive Error\n"); + x->rx_mii++; + } + if (unlikely(p->des01.erx.crc_error)) { + CHIP_DBG(KERN_ERR "\tCRC error\n"); + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, + p->des01.erx.frame_type, p->des01.erx.payload_csum_error); + + if (unlikely(p->des01.erx.dribbling)) { + CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); + ret = discard_frame; + } + if (unlikely(p->des01.erx.sa_filter_fail)) { + CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.da_filter_fail)) { + CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.length_error)) { + CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); + x->rx_length++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.erx.vlan_tag) { + CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); + x->rx_vlan++; + } +#endif + return ret; +} + +static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + /* To support jumbo frames */ + p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; + if (i == ring_size - 1) + p->des01.erx.end_ring = 1; + if (disable_rx_ic) + p->des01.erx.disable_ic = 1; + p++; + } + return; +} + +static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + + for (i = 0; i < ring_size; i++) { + p->des01.etx.own = 0; + if (i == ring_size - 1) + p->des01.etx.end_ring = 1; + p++; + } + + return; +} + +static int enh_desc_get_tx_owner(struct dma_desc *p) +{ + return p->des01.etx.own; +} + +static int enh_desc_get_rx_owner(struct dma_desc *p) +{ + return p->des01.erx.own; +} + +static void enh_desc_set_tx_owner(struct dma_desc *p) +{ + p->des01.etx.own = 1; +} + +static void enh_desc_set_rx_owner(struct dma_desc *p) +{ + p->des01.erx.own = 1; +} + +static int enh_desc_get_tx_ls(struct dma_desc *p) +{ + return p->des01.etx.last_segment; +} + +static void enh_desc_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.etx.end_ring; + + memset(p, 0, sizeof(struct dma_desc)); + p->des01.etx.end_ring = ter; + + return; +} + +static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.etx.first_segment = is_fs; + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des01.etx.buffer1_size = BUF_SIZE_4KiB; + p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + } else { + p->des01.etx.buffer1_size = len; + } + if (likely(csum_flag)) + p->des01.etx.checksum_insertion = cic_full; +} + +static void enh_desc_clear_tx_ic(struct dma_desc *p) +{ + p->des01.etx.interrupt = 0; +} + +static void enh_desc_close_tx_desc(struct dma_desc *p) +{ + p->des01.etx.last_segment = 1; + p->des01.etx.interrupt = 1; +} + +static int enh_desc_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.erx.frame_length; +} + +struct stmmac_desc_ops enh_desc_ops = { + .tx_status = enh_desc_get_tx_status, + .rx_status = enh_desc_get_rx_status, + .get_tx_len = enh_desc_get_tx_len, + .init_rx_desc = enh_desc_init_rx_desc, + .init_tx_desc = enh_desc_init_tx_desc, + .get_tx_owner = enh_desc_get_tx_owner, + .get_rx_owner = enh_desc_get_rx_owner, + .release_tx_desc = enh_desc_release_tx_desc, + .prepare_tx_desc = enh_desc_prepare_tx_desc, + .clear_tx_ic = enh_desc_clear_tx_ic, + .close_tx_desc = enh_desc_close_tx_desc, + .get_tx_ls = enh_desc_get_tx_ls, + .set_tx_owner = enh_desc_set_tx_owner, + .set_rx_owner = enh_desc_set_rx_owner, + .get_rx_frame_len = enh_desc_get_rx_frame_len, +}; diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c new file mode 100644 index 00000000000..ecfcc001a04 --- /dev/null +++ b/drivers/net/stmmac/norm_desc.c @@ -0,0 +1,240 @@ +/******************************************************************************* + This contains the functions to handle the normal descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include "common.h" + +static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.tx.error_summary)) { + if (unlikely(p->des01.tx.underflow_error)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(p->des01.tx.no_carrier)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.tx.loss_carrier)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((p->des01.tx.excessive_deferral) || + (p->des01.tx.excessive_collisions) || + (p->des01.tx.late_collision))) + stats->collisions += p->des01.tx.collision_count; + ret = -1; + } + if (unlikely(p->des01.tx.heartbeat_fail)) { + x->tx_heartbeat++; + stats->tx_heartbeat_errors++; + ret = -1; + } + if (unlikely(p->des01.tx.deferred)) + x->tx_deferred++; + + return ret; +} + +static int ndesc_get_tx_len(struct dma_desc *p) +{ + return p->des01.tx.buffer1_size; +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns csum_none becasue the device + * is not able to compute the csum in HW. */ +static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = csum_none; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.rx.last_descriptor == 0)) { + pr_warning("ndesc Error: Oversized Ethernet " + "frame spanned multiple buffers\n"); + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(p->des01.rx.error_summary)) { + if (unlikely(p->des01.rx.descriptor_error)) + x->rx_desc++; + if (unlikely(p->des01.rx.partial_frame_error)) + x->rx_partial++; + if (unlikely(p->des01.rx.run_frame)) + x->rx_runt++; + if (unlikely(p->des01.rx.frame_too_long)) + x->rx_toolong++; + if (unlikely(p->des01.rx.collision)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(p->des01.rx.crc_error)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(p->des01.rx.dribbling)) + ret = discard_frame; + + if (unlikely(p->des01.rx.length_error)) { + x->rx_length++; + ret = discard_frame; + } + if (unlikely(p->des01.rx.mii_error)) { + x->rx_mii++; + ret = discard_frame; + } + if (p->des01.rx.multicast_frame) { + x->rx_multicast++; + stats->multicast++; + } + return ret; +} + +static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + if (i == ring_size - 1) + p->des01.rx.end_ring = 1; + if (disable_rx_ic) + p->des01.rx.disable_ic = 1; + p++; + } + return; +} + +static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.tx.own = 0; + if (i == ring_size - 1) + p->des01.tx.end_ring = 1; + p++; + } + return; +} + +static int ndesc_get_tx_owner(struct dma_desc *p) +{ + return p->des01.tx.own; +} + +static int ndesc_get_rx_owner(struct dma_desc *p) +{ + return p->des01.rx.own; +} + +static void ndesc_set_tx_owner(struct dma_desc *p) +{ + p->des01.tx.own = 1; +} + +static void ndesc_set_rx_owner(struct dma_desc *p) +{ + p->des01.rx.own = 1; +} + +static int ndesc_get_tx_ls(struct dma_desc *p) +{ + return p->des01.tx.last_segment; +} + +static void ndesc_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.tx.end_ring; + + /* clean field used within the xmit */ + p->des01.tx.first_segment = 0; + p->des01.tx.last_segment = 0; + p->des01.tx.buffer1_size = 0; + + /* clean status reported */ + p->des01.tx.error_summary = 0; + p->des01.tx.underflow_error = 0; + p->des01.tx.no_carrier = 0; + p->des01.tx.loss_carrier = 0; + p->des01.tx.excessive_deferral = 0; + p->des01.tx.excessive_collisions = 0; + p->des01.tx.late_collision = 0; + p->des01.tx.heartbeat_fail = 0; + p->des01.tx.deferred = 0; + + /* set termination field */ + p->des01.tx.end_ring = ter; + + return; +} + +static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.tx.first_segment = is_fs; + p->des01.tx.buffer1_size = len; +} + +static void ndesc_clear_tx_ic(struct dma_desc *p) +{ + p->des01.tx.interrupt = 0; +} + +static void ndesc_close_tx_desc(struct dma_desc *p) +{ + p->des01.tx.last_segment = 1; + p->des01.tx.interrupt = 1; +} + +static int ndesc_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.rx.frame_length; +} + +struct stmmac_desc_ops ndesc_ops = { + .tx_status = ndesc_get_tx_status, + .rx_status = ndesc_get_rx_status, + .get_tx_len = ndesc_get_tx_len, + .init_rx_desc = ndesc_init_rx_desc, + .init_tx_desc = ndesc_init_tx_desc, + .get_tx_owner = ndesc_get_tx_owner, + .get_rx_owner = ndesc_get_rx_owner, + .release_tx_desc = ndesc_release_tx_desc, + .prepare_tx_desc = ndesc_prepare_tx_desc, + .clear_tx_ic = ndesc_clear_tx_ic, + .close_tx_desc = ndesc_close_tx_desc, + .get_tx_ls = ndesc_get_tx_ls, + .set_tx_owner = ndesc_set_tx_owner, + .set_rx_owner = ndesc_set_rx_owner, + .get_rx_frame_len = ndesc_get_rx_frame_len, +}; diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h index ba35e6943cf..ebebc644b1b 100644 --- a/drivers/net/stmmac/stmmac.h +++ b/drivers/net/stmmac/stmmac.h @@ -20,14 +20,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#define DRV_MODULE_VERSION "Jan_2010" +#define DRV_MODULE_VERSION "Apr_2010" #include <linux/stmmac.h> -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define STMMAC_VLAN_TAG_USED -#include <linux/if_vlan.h> -#endif - #include "common.h" #ifdef CONFIG_STMMAC_TIMER #include "stmmac_timer.h" @@ -93,6 +88,7 @@ struct stmmac_priv { #ifdef STMMAC_VLAN_TAG_USED struct vlan_group *vlgrp; #endif + int enh_desc; }; #ifdef CONFIG_STM_DRIVERS @@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev) extern int stmmac_mdio_unregister(struct net_device *ndev); extern int stmmac_mdio_register(struct net_device *ndev); extern void stmmac_set_ethtool_ops(struct net_device *netdev); +extern struct stmmac_desc_ops enh_desc_ops; +extern struct stmmac_desc_ops ndesc_ops; diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a6733612d64..7ac6ddea989 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -44,6 +44,7 @@ #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include "stmmac.h" #define STMMAC_RESOURCE_NAME "stmmaceth" @@ -836,7 +837,7 @@ static int stmmac_open(struct net_device *dev) #ifdef CONFIG_STMMAC_TIMER priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); if (unlikely(priv->tm == NULL)) { - pr_err("%s: ERROR: timer memory alloc failed \n", __func__); + pr_err("%s: ERROR: timer memory alloc failed\n", __func__); return -ENOMEM; } priv->tm->freq = tmrate; @@ -1279,7 +1280,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; - priv->dev->last_rx = jiffies; } entry = next_entry; p = p_next; /* use prefetched values */ @@ -1586,6 +1586,12 @@ static int stmmac_mac_device_setup(struct net_device *dev) else device = dwmac100_setup(ioaddr); + if (priv->enh_desc) { + device->desc = &enh_desc_ops; + pr_info("\tEnhanced descriptor structure\n"); + } else + device->desc = &ndesc_ops; + if (!device) return -ENOMEM; @@ -1685,7 +1691,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) } pr_info("done!\n"); - if (!request_mem_region(res->start, (res->end - res->start), + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", @@ -1694,9 +1700,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev) goto out; } - addr = ioremap(res->start, (res->end - res->start)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { - pr_err("%s: ERROR: memory mapping failed \n", __func__); + pr_err("%s: ERROR: memory mapping failed\n", __func__); ret = -ENOMEM; goto out; } @@ -1726,6 +1732,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) priv->bus_id = plat_dat->bus_id; priv->pbl = plat_dat->pbl; /* TLI */ priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ + priv->enh_desc = plat_dat->enh_desc; platform_set_drvdata(pdev, ndev); @@ -1774,7 +1781,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) out: if (ret < 0) { platform_set_drvdata(pdev, NULL); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); if (addr != NULL) iounmap(addr); } @@ -1812,7 +1819,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) iounmap((void *)ndev->base_addr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); free_netdev(ndev); diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c index fffe1d037fe..40b2c792971 100644 --- a/drivers/net/stmmac/stmmac_mdio.c +++ b/drivers/net/stmmac/stmmac_mdio.c @@ -26,6 +26,7 @@ #include <linux/mii.h> #include <linux/phy.h> +#include <linux/slab.h> #include "stmmac.h" diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 2f6a760e5f2..31ab4ab0796 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -33,7 +33,6 @@ static int fifo=0x8; /* don't change */ #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> @@ -413,7 +412,7 @@ static int init586(struct net_device *dev) volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs=netdev_mc_count(dev); ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); @@ -537,9 +536,9 @@ static int init586(struct net_device *dev) mc_cmd->mc_cnt = swab16(num_addrs * 6); i = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) memcpy((char *) mc_cmd->mc_list[i++], - dmi->dmi_addr, ETH_ALEN); + ha->addr, ETH_ALEN); p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd_cuc = CUC_START; diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 99998862c22..1694ca5bfb4 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -28,7 +28,6 @@ static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.ne #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index a0bd361d5ec..52913155ce4 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -11,7 +11,6 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> @@ -25,6 +24,7 @@ #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/gfp.h> #include <asm/auxio.h> #include <asm/byteorder.h> @@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i; u32 tmp, crc; @@ -1028,8 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index a855934dfc3..da45f01279c 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -84,7 +84,6 @@ static char *media[MAX_UNITS]; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -1523,13 +1522,13 @@ static void set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int bit; int index; int crc; memset (mc_filter, 0, sizeof (mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); for (index=0, bit=0; bit < 6; bit++, crc <<= 1) if (crc & 0x80000000) index |= 1 << bit; mc_filter[index/16] |= (1 << (index % 16)); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 4344017bfae..2b78e97ea9c 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -39,7 +39,6 @@ #include <linux/ioport.h> #include <linux/in.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> @@ -58,6 +57,7 @@ #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/mm.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> @@ -782,7 +782,7 @@ static int gem_rx(struct gem *gp, int work_to_do) break; /* When writing back RX descriptor, GEM writes status - * then buffer address, possibly in seperate transactions. + * then buffer address, possibly in separate transactions. * If we don't wait for the chip to write both, we could * post a new buffer to this descriptor then have GEM spam * on the buffer address. We sync on the RX completion @@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp) } else { u16 hash_table[16]; u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, gp->dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, gp->dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index b17dbb11bd6..20deb14e98f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp) hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if ((hp->dev->flags & IFF_PROMISC) == 0) { u16 hash_table[4]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, hp->dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, hp->dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; @@ -2362,7 +2362,7 @@ static void happy_meal_set_multicast(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); void __iomem *bregs = hp->bigmacregs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -2380,8 +2380,8 @@ static void happy_meal_set_multicast(struct net_device *dev) u16 hash_table[4]; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index d7c73f478ef..c7748b73fa0 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -78,7 +78,6 @@ static char lancestr[] = "LANCE"; #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> @@ -94,6 +93,7 @@ static char lancestr[] = "LANCE"; #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> @@ -1170,7 +1170,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; u32 val; @@ -1195,8 +1195,8 @@ static void lance_load_multicast(struct net_device *dev) return; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index be637dce944..239f0977219 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; char *addrs; int i; @@ -651,8 +651,8 @@ static void qe_set_multicast(struct net_device *dev) u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 6b1b7cea7f6..6cf8b06be5c 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -763,12 +763,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) static void __update_mc_list(struct vnet *vp, struct net_device *dev) { - struct dev_addr_list *p; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(p, dev) { + netdev_for_each_mc_addr(ha, dev) { struct vnet_mcast_entry *m; - m = __vnet_mc_find(vp, p->dmi_addr); + m = __vnet_mc_find(vp, ha->addr); if (m) { m->hit = 1; continue; @@ -778,7 +778,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev) m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) continue; - memcpy(m->addr, p->dmi_addr, ETH_ALEN); + memcpy(m->addr, ha->addr, ETH_ALEN); m->hit = 1; m->next = vp->mcast_list; diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 49bd84c0d58..36149ddace4 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -1954,16 +1954,16 @@ tc35815_set_multicast_list(struct net_device *dev) /* Disable promiscuous mode, use normal mode. */ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; int i; int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); tc_writel(0, &tr->CAM_Ctl); /* Walk the address list, and load the filter */ i = 0; - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { /* entry 0,1 is reserved. */ - tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); + tc35815_set_cam_entry(dev, i + 2, ha->addr); ena_bits |= CAM_Ena_Bit(i + 2); i++; } diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 0c9780217c8..e29f495c6a2 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev) WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); } else if (!netdev_mc_empty(ndev)) { u8 hash; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 reg, val; /* set IMF to deny all multicast frames */ @@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev) * into RX_MAC_MCST regs. we skip this phase now and accept ALL * multicast frames throu IMF */ /* accept the rest of addresses throu IMF */ - netdev_for_each_mc_addr(mclist, ndev) { + netdev_for_each_mc_addr(ha, ndev) { hash = 0; for (i = 0; i < ETH_ALEN; i++) - hash ^= mclist->dmi_addr[i]; + hash ^= ha->addr[i]; reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); val = READ_REG(priv, reg); val |= (1 << (hash % 32)); @@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) priv->net_stats.rx_bytes += len; skb_put(skb, len); - skb->dev = priv->ndev; skb->ip_summed = CHECKSUM_UNNECESSARY; skb->protocol = eth_type_trans(skb, priv->ndev); @@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, int nr_frags = skb_shinfo(skb)->nr_frags; int i; - db->wptr->len = skb->len - skb->data_len; + db->wptr->len = skb_headlen(skb); db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, db->wptr->len, PCI_DMA_TODEVICE); pbl->len = CPU_CHIP_SWAP32(db->wptr->len); @@ -1851,7 +1850,7 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) * @data - desc's data * @size - desc's size * - * NOTE: this func does check for available space and, if neccessary, waits for + * NOTE: this func does check for available space and, if necessary, waits for * NIC to read existing data before writing new one. */ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h index a19dcf8b6b5..cff98d07cba 100644 --- a/drivers/net/tehuti.h +++ b/drivers/net/tehuti.h @@ -32,6 +32,7 @@ #include <linux/firmware.h> #include <asm/byteorder.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> /* Compile Time Switches */ /* start */ diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 0fa7688ab48..0fea6854c4a 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -67,8 +67,8 @@ #include "tg3.h" #define DRV_MODULE_NAME "tg3" -#define DRV_MODULE_VERSION "3.108" -#define DRV_MODULE_RELDATE "February 17, 2010" +#define DRV_MODULE_VERSION "3.110" +#define DRV_MODULE_RELDATE "April 9, 2010" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -101,7 +101,7 @@ #define TG3_DEF_RX_RING_PENDING 200 #define TG3_RX_JUMBO_RING_SIZE 256 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 -#define TG3_RSS_INDIR_TBL_SIZE 128 +#define TG3_RSS_INDIR_TBL_SIZE 128 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -126,6 +126,9 @@ TG3_TX_RING_SIZE) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) +#define TG3_RX_DMA_ALIGN 16 +#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) + #define TG3_DMA_BYTE_ENAB 64 #define TG3_RX_STD_DMA_SZ 1536 @@ -142,6 +145,26 @@ #define TG3_RX_JMB_BUFF_RING_SIZE \ (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) +#define TG3_RSS_MIN_NUM_MSIX_VECS 2 + +/* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ +#define TG3_RX_COPY_THRESHOLD 256 +#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD +#else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) +#endif + /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) @@ -152,6 +175,8 @@ #define TG3_NUM_TEST 6 +#define TG3_FW_UPDATE_TIMEOUT_SEC 5 + #define FIRMWARE_TG3 "tigon/tg3.bin" #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" @@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3); MODULE_FIRMWARE(FIRMWARE_TG3TSO); MODULE_FIRMWARE(FIRMWARE_TG3TSO5); -#define TG3_RSS_MIN_NUM_MSIX_VECS 2 - static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ module_param(tg3_debug, int, 0); MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); @@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val) static u32 tg3_read32(struct tg3 *tp, u32 off) { - return (readl(tp->regs + off)); + return readl(tp->regs + off); } static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) @@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) static u32 tg3_ape_read32(struct tg3 *tp, u32 off) { - return (readl(tp->aperegs + off)); + return readl(tp->aperegs + off); } static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) @@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) { - return (readl(tp->regs + off + GRCMBOX_BASE)); + return readl(tp->regs + off + GRCMBOX_BASE); } static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) @@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) writel(val, tp->regs + off + GRCMBOX_BASE); } -#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) +#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) -#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) -#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) -#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) +#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) +#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) +#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) -#define tw32(reg,val) tp->write32(tp, reg, val) -#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) -#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) -#define tr32(reg) tp->read32(tp, reg) +#define tw32(reg, val) tp->write32(tp, reg, val) +#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) +#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) +#define tr32(reg) tp->read32(tp, reg) static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { @@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) return 0; switch (locknum) { - case TG3_APE_LOCK_GRC: - case TG3_APE_LOCK_MEM: - break; - default: - return -EINVAL; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; } off = 4 * locknum; @@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) return; switch (locknum) { - case TG3_APE_LOCK_GRC: - case TG3_APE_LOCK_MEM: - break; - default: - return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return; } off = 4 * locknum; @@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp) tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); @@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp) i = mdiobus_register(tp->mdio_bus); if (i) { - netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i); + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); mdiobus_free(tp->mdio_bus); return i; } @@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (!phydev || !phydev->drv) { - netdev_warn(tp->dev, "No PHY devices\n"); + dev_warn(&tp->pdev->dev, "No PHY devices\n"); mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); return -ENODEV; @@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev) phydev->speed != tp->link_config.active_speed || phydev->duplex != tp->link_config.active_duplex || oldflowctrl != tp->link_config.active_flowctrl) - linkmesg = 1; + linkmesg = 1; tp->link_config.active_speed = phydev->speed; tp->link_config.active_duplex = phydev->duplex; @@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp) phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, phydev->dev_flags, phydev->interface); if (IS_ERR(phydev)) { - netdev_err(tp->dev, "Could not attach to PHY\n"); + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } @@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { /* Set Extended packet length bit for jumbo frames */ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); - } - else { + } else { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); } @@ -1974,8 +1997,7 @@ out: tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); - } - else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { + } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { @@ -2007,8 +2029,8 @@ out: u32 phy_reg; if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) - tg3_writephy(tp, MII_TG3_EXT_CTRL, - phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + tg3_writephy(tp, MII_TG3_EXT_CTRL, + phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { @@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->rxconfig = rx_cfg_reg; ret = ANEG_OK; - switch(ap->state) { + switch (ap->state) { case ANEG_STATE_UNKNOWN: if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; @@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, /* fallthru */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) { + if (delta > ANEG_STATE_SETTLE_TIME) ap->state = ANEG_STATE_ABILITY_DETECT_INIT; - } else { + else ret = ANEG_TIMER_ENAB; - } break; case ANEG_STATE_DISABLE_LINK_OK: @@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, break; case ANEG_STATE_ABILITY_DETECT: - if (ap->ability_match != 0 && ap->rxconfig != 0) { + if (ap->ability_match != 0 && ap->rxconfig != 0) ap->state = ANEG_STATE_ACK_DETECT_INIT; - } break; case ANEG_STATE_ACK_DETECT_INIT: @@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; - } - else + } else { current_link_up = 0; + } } } @@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) tp->serdes_counter--; return; } + if (!netif_carrier_ok(tp->dev) && (tp->link_config.autoneg == AUTONEG_ENABLE)) { u32 bmcr; @@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; } } - } - else if (netif_carrier_ok(tp->dev) && - (tp->link_config.autoneg == AUTONEG_ENABLE) && - (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { u32 phy2; /* Select expansion interrupt status register */ @@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) { int err; - if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) err = tg3_setup_fiber_phy(tp, force_reset); - } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) err = tg3_setup_fiber_mii_phy(tp, force_reset); - } else { + else err = tg3_setup_copper_phy(tp, force_reset); - } if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { u32 val, scale; @@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp) BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || tp->write32_tx_mbox == tg3_write_indirect_mbox); - netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n" - "Please report the problem to the driver maintainer and include system chipset information.\n"); + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); spin_lock(&tp->lock); tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; @@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi) } pci_unmap_single(tp->pdev, - pci_unmap_addr(ri, mapping), + dma_unmap_addr(ri, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); @@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi) tx_bug = 1; pci_unmap_page(tp->pdev, - pci_unmap_addr(ri, mapping), + dma_unmap_addr(ri, mapping), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); sw_idx = NEXT_TX(sw_idx); @@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) if (!ri->skb) return; - pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(ri->skb); ri->skb = NULL; @@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, } map->skb = skb; - pci_unmap_addr_set(map, mapping, mapping); + dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); @@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; - int dest_idx; struct tg3_rx_prodring_set *spr = &tp->prodring[0]; + int dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: @@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, } dest_map->skb = src_map->skb; - pci_unmap_addr_set(dest_map, mapping, - pci_unmap_addr(src_map, mapping)); + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; dest_desc->addr_lo = src_desc->addr_lo; @@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; + bool hw_vlan __maybe_unused = false; + u16 vtag __maybe_unused = 0; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->prodring[0].rx_std_buffers[desc_idx]; - dma_addr = pci_unmap_addr(ri, mapping); + dma_addr = dma_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; - dma_addr = pci_unmap_addr(ri, mapping); + dma_addr = dma_unmap_addr(ri, mapping); skb = ri->skb; post_ptr = &jmb_prod_idx; } else @@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; - if (len > RX_COPY_THRESHOLD && - tp->rx_offset == NET_IP_ALIGN) { - /* rx_offset will likely not equal NET_IP_ALIGN - * if this is a 5701 card running in PCI-X mode - * [see tg3_get_invariants()] - */ + if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, @@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, - len + TG3_RAW_IP_ALIGN); + copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + + TG3_RAW_IP_ALIGN); if (copy_skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); @@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto next_pkt; } + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { + vtag = desc->err_vlan & RXD_VLAN_MASK; #if TG3_VLAN_TAG_USED - if (tp->vlgrp != NULL && - desc->type_flags & RXD_FLAG_VLAN) { - vlan_gro_receive(&tnapi->napi, tp->vlgrp, - desc->err_vlan & RXD_VLAN_MASK, skb); - } else + if (tp->vlgrp) + hw_vlan = true; + else +#endif + { + struct vlan_ethhdr *ve = (struct vlan_ethhdr *) + __skb_push(skb, VLAN_HLEN); + + memmove(ve, skb->data + VLAN_HLEN, + ETH_ALEN * 2); + ve->h_vlan_proto = htons(ETH_P_8021Q); + ve->h_vlan_TCI = htons(vtag); + } + } + +#if TG3_VLAN_TAG_USED + if (hw_vlan) + vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); + else #endif napi_gro_receive(&tnapi->napi, skb); @@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) if (unlikely(work_done >= budget)) break; - /* tp->last_tag is used in tg3_restart_ints() below + /* tp->last_tag is used in tg3_int_reenable() below * to tell the hw how much work has been processed, * so we must read it before checking for more work. */ @@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) rmb(); /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer == tnapi->tx_cons && - *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { napi_complete(napi); /* Reenable interrupts. */ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); @@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) err = tg3_init_hw(tp, reset_phy); if (err) { - netdev_err(tp->dev, "Failed to re-initialize device, aborting\n"); + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_full_unlock(tp); del_timer_sync(&tp->timer); @@ -5279,7 +5316,7 @@ static void tg3_poll_controller(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); for (i = 0; i < tp->irq_cnt; i++) - tg3_interrupt(tp->napi[i].irq_vec, dev); + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); } #endif @@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, len = skb_shinfo(skb)->frags[i-1].size; pci_unmap_single(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], + dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), len, PCI_DMA_TODEVICE); if (i == 0) { tnapi->tx_buffers[entry].skb = new_skb; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, new_addr); } else { tnapi->tx_buffers[entry].skb = NULL; @@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct netdev_queue *txq; unsigned int i, last; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); tnapi = &tp->napi[skb_get_queue_mapping(skb)]; if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) @@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, netif_tx_stop_queue(txq); /* This is a hard error, log it. */ - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } @@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, tcp_hdr(skb)->check = 0; - } - else if (skb->ip_summed == CHECKSUM_PARTIAL) + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { base_flags |= TXD_FLAG_TCPUDP_CSUM; + } + #if TG3_VLAN_TAG_USED if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) base_flags |= (TXD_FLAG_VLAN | @@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, } tnapi->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && !mss && skb->len > ETH_DATA_LEN) @@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, goto dma_error; tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); tg3_set_txd(tnapi, entry, mapping, len, @@ -5627,7 +5665,7 @@ dma_error: entry = tnapi->tx_prod; tnapi->tx_buffers[entry].skb = NULL; pci_unmap_single(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), + dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), skb_headlen(skb), PCI_DMA_TODEVICE); for (i = 0; i <= last; i++) { @@ -5635,7 +5673,7 @@ dma_error: entry = NEXT_TX(entry); pci_unmap_page(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], + dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), frag->size, PCI_DMA_TODEVICE); } @@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, struct netdev_queue *txq; unsigned int i, last; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); tnapi = &tp->napi[skb_get_queue_mapping(skb)]; if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) @@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, netif_tx_stop_queue(txq); /* This is a hard error, log it. */ - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } @@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, hdr_len = ip_tcp_len + tcp_opt_len; if (unlikely((ETH_HLEN + hdr_len) > 80) && (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) - return (tg3_tso_bug(tp, skb)); + return tg3_tso_bug(tp, skb); base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); @@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, } tnapi->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); would_hit_hwbug = 0; @@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, len, PCI_DMA_TODEVICE); tnapi->tx_buffers[entry].skb = NULL; - pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); if (pci_dma_mapping_error(tp->pdev, mapping)) goto dma_error; @@ -5898,7 +5936,7 @@ dma_error: entry = tnapi->tx_prod; tnapi->tx_buffers[entry].skb = NULL; pci_unmap_single(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), + dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), skb_headlen(skb), PCI_DMA_TODEVICE); for (i = 0; i <= last; i++) { @@ -5906,7 +5944,7 @@ dma_error: entry = NEXT_TX(entry); pci_unmap_page(tp->pdev, - pci_unmap_addr(&tnapi->tx_buffers[entry], + dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), frag->size, PCI_DMA_TODEVICE); } @@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; ethtool_op_set_tso(dev, 0); - } - else + } else { tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; + } } else { if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; @@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp, } } -/* Initialize tx/rx rings for packet processing. +/* Initialize rx rings for packet processing. * * The chip has been shut down and the driver detached from * the networking, so no interrupts or new tx packets will @@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { - netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n", - i, tp->rx_pending); + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); if (i == 0) goto initfail; tp->rx_pending = i; @@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, for (i = 0; i < tp->rx_jumbo_pending; i++) { if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { - netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n", - i, tp->rx_jumbo_pending); + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); if (i == 0) goto initfail; tp->rx_jumbo_pending = i; @@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp) } pci_unmap_single(tp->pdev, - pci_unmap_addr(txp, mapping), + dma_unmap_addr(txp, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); txp->skb = NULL; @@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp) for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; pci_unmap_page(tp->pdev, - pci_unmap_addr(txp, mapping), + dma_unmap_addr(txp, mapping), skb_shinfo(skb)->frags[k].size, PCI_DMA_TODEVICE); i++; @@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int } if (i == MAX_WAIT_CNT && !silent) { - pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", - ofs, enable_bit); + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); return -ENODEV; } @@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) break; } if (i >= MAX_WAIT_CNT) { - netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", - __func__, tr32(MAC_TX_MODE)); + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); err |= -ENODEV; } @@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) return; switch (kind) { - case RESET_KIND_INIT: - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, - APE_HOST_SEG_SIG_MAGIC); - tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, - APE_HOST_SEG_LEN_MAGIC); - apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); - tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); - tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, - APE_HOST_DRIVER_ID_MAGIC); - tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, - APE_HOST_BEHAV_NO_PHYLOCK); - - event = APE_EVENT_STATUS_STATE_START; - break; - case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - event = APE_EVENT_STATUS_STATE_UNLOAD; - break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; - default: - return; + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; } event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; @@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b if (cpu_base == TX_CPU_BASE && (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { - netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", __func__); return -EINVAL; } @@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) udelay(1000); } if (i >= 5) { - netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n", + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, tr32(RX_CPU_BASE + CPU_PC), info.fw_base); return -ENODEV; } @@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp) udelay(1000); } if (i >= 5) { - netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", __func__, tr32(cpu_base + CPU_PC), info.fw_base); return -ENODEV; } @@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); - if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { + if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) tg3_abort_hw(tp, 1); - } if (reset_phy) tg3_phy_reset(tp); @@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express @@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { val = tr32(TG3PCI_DMA_RW_CTRL) & ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { @@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); - } - else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { + } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { int fw_len; fw_len = tp->fw_len; @@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | - (RX_STD_MAX_SIZE << 2); + (TG3_RX_STD_DMA_SZ << 2); else - val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; } else val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; @@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque) tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE3); tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); - /* 5 seconds timeout */ - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); tg3_generate_fw_event(tp); } @@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp) return err; /* MSI test failed, go back to INTx mode */ - netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n" - "Please report this failure to the PCI maintainer and include system chipset information\n"); + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); free_irq(tp->napi[0].irq_vec, &tp->napi[0]); @@ -8738,7 +8805,8 @@ static void tg3_ints_init(struct tg3 *tp) /* All MSI supporting chips should support tagged * status. Assert that this is the case. */ - netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n"); + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); goto defcfg; } @@ -8913,236 +8981,6 @@ err_out1: return err; } -#if 0 -/*static*/ void tg3_dump_state(struct tg3 *tp) -{ - u32 val32, val32_2, val32_3, val32_4, val32_5; - u16 val16; - int i; - struct tg3_hw_status *sblk = tp->napi[0]->hw_status; - - pci_read_config_word(tp->pdev, PCI_STATUS, &val16); - pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); - printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n", - val16, val32); - - /* MAC block */ - printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n", - tr32(MAC_MODE), tr32(MAC_STATUS)); - printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n", - tr32(MAC_EVENT), tr32(MAC_LED_CTRL)); - printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n", - tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS)); - printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n", - tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS)); - - /* Send data initiator control block */ - printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n", - tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS)); - printk(" SNDDATAI_STATSCTRL[%08x]\n", - tr32(SNDDATAI_STATSCTRL)); - - /* Send data completion control block */ - printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE)); - - /* Send BD ring selector block */ - printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n", - tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS)); - - /* Send BD initiator control block */ - printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n", - tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS)); - - /* Send BD completion control block */ - printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE)); - - /* Receive list placement control block */ - printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n", - tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS)); - printk(" RCVLPC_STATSCTRL[%08x]\n", - tr32(RCVLPC_STATSCTRL)); - - /* Receive data and receive BD initiator control block */ - printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n", - tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS)); - - /* Receive data completion control block */ - printk("DEBUG: RCVDCC_MODE[%08x]\n", - tr32(RCVDCC_MODE)); - - /* Receive BD initiator control block */ - printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n", - tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS)); - - /* Receive BD completion control block */ - printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n", - tr32(RCVCC_MODE), tr32(RCVCC_STATUS)); - - /* Receive list selector control block */ - printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n", - tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS)); - - /* Mbuf cluster free block */ - printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n", - tr32(MBFREE_MODE), tr32(MBFREE_STATUS)); - - /* Host coalescing control block */ - printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n", - tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS)); - printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n", - tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), - tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); - printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n", - tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), - tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); - printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n", - tr32(HOSTCC_STATS_BLK_NIC_ADDR)); - printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n", - tr32(HOSTCC_STATUS_BLK_NIC_ADDR)); - - /* Memory arbiter control block */ - printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n", - tr32(MEMARB_MODE), tr32(MEMARB_STATUS)); - - /* Buffer manager control block */ - printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n", - tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS)); - printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n", - tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE)); - printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] " - "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n", - tr32(BUFMGR_DMA_DESC_POOL_ADDR), - tr32(BUFMGR_DMA_DESC_POOL_SIZE)); - - /* Read DMA control block */ - printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n", - tr32(RDMAC_MODE), tr32(RDMAC_STATUS)); - - /* Write DMA control block */ - printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n", - tr32(WDMAC_MODE), tr32(WDMAC_STATUS)); - - /* DMA completion block */ - printk("DEBUG: DMAC_MODE[%08x]\n", - tr32(DMAC_MODE)); - - /* GRC block */ - printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n", - tr32(GRC_MODE), tr32(GRC_MISC_CFG)); - printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n", - tr32(GRC_LOCAL_CTRL)); - - /* TG3_BDINFOs */ - printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n", - tr32(RCVDBDI_JUMBO_BD + 0x0), - tr32(RCVDBDI_JUMBO_BD + 0x4), - tr32(RCVDBDI_JUMBO_BD + 0x8), - tr32(RCVDBDI_JUMBO_BD + 0xc)); - printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n", - tr32(RCVDBDI_STD_BD + 0x0), - tr32(RCVDBDI_STD_BD + 0x4), - tr32(RCVDBDI_STD_BD + 0x8), - tr32(RCVDBDI_STD_BD + 0xc)); - printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n", - tr32(RCVDBDI_MINI_BD + 0x0), - tr32(RCVDBDI_MINI_BD + 0x4), - tr32(RCVDBDI_MINI_BD + 0x8), - tr32(RCVDBDI_MINI_BD + 0xc)); - - tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32); - tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2); - tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3); - tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4); - printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n", - val32, val32_2, val32_3, val32_4); - - tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32); - tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2); - tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3); - tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4); - printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n", - val32, val32_2, val32_3, val32_4); - - tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32); - tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2); - tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3); - tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4); - tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5); - printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n", - val32, val32_2, val32_3, val32_4, val32_5); - - /* SW status block */ - printk(KERN_DEBUG - "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", - sblk->status, - sblk->status_tag, - sblk->rx_jumbo_consumer, - sblk->rx_consumer, - sblk->rx_mini_consumer, - sblk->idx[0].rx_producer, - sblk->idx[0].tx_consumer); - - /* SW statistics block */ - printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", - ((u32 *)tp->hw_stats)[0], - ((u32 *)tp->hw_stats)[1], - ((u32 *)tp->hw_stats)[2], - ((u32 *)tp->hw_stats)[3]); - - /* Mailboxes */ - printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", - tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), - tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), - tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), - tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); - - /* NIC side send descriptors. */ - for (i = 0; i < 6; i++) { - unsigned long txd; - - txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC - + (i * sizeof(struct tg3_tx_buffer_desc)); - printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n", - i, - readl(txd + 0x0), readl(txd + 0x4), - readl(txd + 0x8), readl(txd + 0xc)); - } - - /* NIC side RX descriptors. */ - for (i = 0; i < 6; i++) { - unsigned long rxd; - - rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC - + (i * sizeof(struct tg3_rx_buffer_desc)); - printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n", - i, - readl(rxd + 0x0), readl(rxd + 0x4), - readl(rxd + 0x8), readl(rxd + 0xc)); - rxd += (4 * sizeof(u32)); - printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n", - i, - readl(rxd + 0x0), readl(rxd + 0x4), - readl(rxd + 0x8), readl(rxd + 0xc)); - } - - for (i = 0; i < 6; i++) { - unsigned long rxd; - - rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC - + (i * sizeof(struct tg3_rx_buffer_desc)); - printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n", - i, - readl(rxd + 0x0), readl(rxd + 0x4), - readl(rxd + 0x8), readl(rxd + 0xc)); - rxd += (4 * sizeof(u32)); - printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n", - i, - readl(rxd + 0x0), readl(rxd + 0x4), - readl(rxd + 0x8), readl(rxd + 0xc)); - } -} -#endif - static struct net_device_stats *tg3_get_stats(struct net_device *); static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); @@ -9161,9 +8999,6 @@ static int tg3_close(struct net_device *dev) tg3_phy_stop(tp); tg3_full_lock(tp, 1); -#if 0 - tg3_dump_state(tp); -#endif tg3_disable_ints(tp); @@ -9405,9 +9240,8 @@ static inline u32 calc_crc(unsigned char *buf, int len) reg >>= 1; - if (tmp) { + if (tmp) reg ^= 0xedb88320; - } } } @@ -9451,20 +9285,20 @@ static void __tg3_set_rx_mode(struct net_device *dev) rx_mode |= RX_MODE_PROMISC; } else if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast. */ - tg3_set_multi (tp, 1); + tg3_set_multi(tp, 1); } else if (netdev_mc_empty(dev)) { /* Reject all multicast. */ - tg3_set_multi (tp, 0); + tg3_set_multi(tp, 0); } else { /* Accept one or more multicast(s). */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[4] = { 0, }; u32 regidx; u32 bit; u32 crc; - netdev_for_each_mc_addr(mclist, dev) { - crc = calc_crc (mclist->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); bit = ~crc & 0x7f; regidx = (bit & 0x60) >> 5; bit &= 0x1f; @@ -9617,7 +9451,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, memcpy(data, ((char*)&val) + b_offset, b_count); len -= b_count; offset += b_count; - eeprom->len += b_count; + eeprom->len += b_count; } /* read bytes upto the last 4 byte boundary */ @@ -9776,7 +9610,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) + if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) mask |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; @@ -10165,8 +9999,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data) if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { if (data != 0) return -EINVAL; - return 0; - } + return 0; + } spin_lock_bh(&tp->lock); if (data) @@ -10185,8 +10019,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { if (data != 0) return -EINVAL; - return 0; - } + return 0; + } if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ethtool_op_set_tx_ipv6_csum(dev, data); @@ -10196,7 +10030,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } -static int tg3_get_sset_count (struct net_device *dev, int sset) +static int tg3_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: @@ -10208,7 +10042,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset) } } -static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) +static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: @@ -10255,7 +10089,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data) return 0; } -static void tg3_get_ethtool_stats (struct net_device *dev, +static void tg3_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *tmp_stats) { struct tg3 *tp = netdev_priv(dev); @@ -10361,8 +10195,7 @@ static int tg3_test_nvram(struct tg3 *tp) for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) parity[k++] = buf8[i] & msk; i++; - } - else if (i == 16) { + } else if (i == 16) { int l; u8 msk; @@ -10460,7 +10293,7 @@ static int tg3_test_registers(struct tg3 *tp) { MAC_ADDR_0_HIGH, 0x0000, 0x00000000, 0x0000ffff }, { MAC_ADDR_0_LOW, 0x0000, - 0x00000000, 0xffffffff }, + 0x00000000, 0xffffffff }, { MAC_RX_MTU_SIZE, 0x0000, 0x00000000, 0x0000ffff }, { MAC_TX_MODE, 0x0000, @@ -10648,7 +10481,8 @@ static int tg3_test_registers(struct tg3 *tp) out: if (netif_msg_hw(tp)) - pr_err("Register test failed at offset %x\n", offset); + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); tw32(offset, save_val); return -EIO; } @@ -10824,9 +10658,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) MII_TG3_EXT_CTRL_LNK3_LED_MODE); } tw32(MAC_MODE, mac_mode); - } - else + } else { return -EINVAL; + } err = -EIO; @@ -10908,7 +10742,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) rx_skb = tpr->rx_std_buffers[desc_idx].skb; - map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); for (i = 14; i < tx_len; i++) { @@ -11082,7 +10916,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(phydev, data, cmd); } - switch(cmd) { + switch (cmd) { case SIOCGMIIPHY: data->phy_id = tp->phy_addr; @@ -11775,7 +11609,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) tp->tg3_flags |= TG3_FLAG_NVRAM; if (tg3_nvram_lock(tp)) { - netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n", + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", __func__); return; } @@ -11894,7 +11729,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, if (ret) break; - page_off = offset & pagemask; + page_off = offset & pagemask; size = pagesize; if (len < size) size = len; @@ -11922,7 +11757,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) break; /* Issue another write enable to start the write. */ @@ -11976,7 +11811,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, memcpy(&data, buf + i, 4); tw32(NVRAM_WRDATA, be32_to_cpu(data)); - page_off = offset % tp->nvram_pagesize; + page_off = offset % tp->nvram_pagesize; phy_addr = tg3_nvram_phys_addr(tp, offset); @@ -11984,7 +11819,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; - if ((page_off == 0) || (i == 0)) + if (page_off == 0 || i == 0) nvram_cmd |= NVRAM_CMD_FIRST; if (page_off == (tp->nvram_pagesize - 4)) nvram_cmd |= NVRAM_CMD_LAST; @@ -12027,8 +11862,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); - } - else { + } else { u32 grc_mode; ret = tg3_nvram_lock(tp); @@ -12048,8 +11882,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) ret = tg3_nvram_write_block_buffered(tp, offset, len, buf); - } - else { + } else { ret = tg3_nvram_write_block_unbuffered(tp, offset, len, buf); } @@ -12544,11 +12377,11 @@ skip_phy_reset: return err; } -static void __devinit tg3_read_partno(struct tg3 *tp) +static void __devinit tg3_read_vpd(struct tg3 *tp) { - unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ + u8 vpd_data[TG3_NVM_VPD_LEN]; unsigned int block_end, rosize, len; - int i = 0; + int j, i = 0; u32 magic; if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || @@ -12597,6 +12430,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp) if (block_end > TG3_NVM_VPD_LEN) goto out_not_found; + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1); + } + +partno: i = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_PARTNO); if (i < 0) @@ -12666,7 +12525,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) static void __devinit tg3_read_bc_ver(struct tg3 *tp) { u32 val, offset, start, ver_offset; - int i; + int i, dst_off; bool newver = false; if (tg3_nvram_read(tp, 0xc, &offset) || @@ -12686,8 +12545,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp) newver = true; } + dst_off = strlen(tp->fw_ver); + if (newver) { - if (tg3_nvram_read(tp, offset + 8, &ver_offset)) + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) return; offset = offset + ver_offset - start; @@ -12696,7 +12558,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp) if (tg3_nvram_read_be32(tp, offset + i, &v)) return; - memcpy(tp->fw_ver + i, &v, sizeof(v)); + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); } } else { u32 major, minor; @@ -12707,7 +12569,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp) major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> TG3_NVM_BCVER_MAJSFT; minor = ver_offset & TG3_NVM_BCVER_MINMSK; - snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor); + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); } } @@ -12731,9 +12594,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) { u32 offset, major, minor, build; - tp->fw_ver[0] = 's'; - tp->fw_ver[1] = 'b'; - tp->fw_ver[2] = '\0'; + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) return; @@ -12770,11 +12631,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) if (minor > 99 || build > 26) return; - snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor); + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); if (build > 0) { - tp->fw_ver[8] = 'a' + build - 1; - tp->fw_ver[9] = '\0'; + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; } } @@ -12861,12 +12725,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp) static void __devinit tg3_read_fw_ver(struct tg3 *tp) { u32 val; + bool vpd_vers = false; - if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { - tp->fw_ver[0] = 's'; - tp->fw_ver[1] = 'b'; - tp->fw_ver[2] = '\0'; + if (tp->fw_ver[0] != 0) + vpd_vers = true; + if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { + strcat(tp->fw_ver, "sb"); return; } @@ -12883,11 +12748,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) return; if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || - (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) - return; + (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers) + goto done; tg3_read_mgmtfw_ver(tp); +done: tp->fw_ver[TG3_VER_SIZE - 1] = 0; } @@ -12897,9 +12763,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) { static struct pci_device_id write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + PCI_DEVICE_ID_AMD_FE_GATE_700C) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, { }, @@ -13065,8 +12931,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); - } - else { + } else { struct pci_dev *bridge = NULL; do { @@ -13188,8 +13053,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || - (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || - (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, @@ -13223,7 +13088,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); if (!tp->pcix_cap) { - pr_err("Cannot find PCI-X capability, aborting\n"); + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); return -EIO; } @@ -13420,7 +13286,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) /* Force the chip into D0. */ err = tg3_set_power_state(tp, PCI_D0); if (err) { - pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev)); + dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); return err; } @@ -13594,13 +13460,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) err = tg3_phy_probe(tp); if (err) { - pr_err("(%s) phy probe failed, err %d\n", - pci_name(tp->pdev), err); + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); /* ... but do not return immediately ... */ tg3_mdio_fini(tp); } - tg3_read_partno(tp); + tg3_read_vpd(tp); tg3_read_fw_ver(tp); if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { @@ -13638,10 +13503,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; - tp->rx_offset = NET_IP_ALIGN; + tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) - tp->rx_offset = 0; + (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { + tp->rx_offset -= NET_IP_ALIGN; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; +#endif + } tp->rx_std_max_post = TG3_RX_RING_SIZE; @@ -13964,11 +13834,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm } pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); - if (to_device) { + if (to_device) tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); - } else { + else tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); - } ret = -ENODEV; for (i = 0; i < 40; i++) { @@ -14104,8 +13973,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) /* Send the buffer to the chip. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); if (ret) { - pr_err("tg3_test_dma() Write the buffer failed %d\n", - ret); + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); break; } @@ -14115,8 +13985,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) u32 val; tg3_read_mem(tp, 0x2100 + (i*4), &val); if (le32_to_cpu(val) != p[i]) { - pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", - val, i); + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on device! " + "(%d != %d)\n", __func__, val, i); /* ret = -ENODEV here? */ } p[i] = 0; @@ -14125,9 +13996,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp) /* Now read it back. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); if (ret) { - pr_err("tg3_test_dma() Read the buffer failed %d\n", - ret); - + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); break; } @@ -14143,8 +14013,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); break; } else { - pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", - p[i], i); + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); ret = -ENODEV; goto out; } @@ -14171,10 +14042,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp) if (pci_dev_present(dma_wait_state_chipsets)) { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; - } - else + } else { /* Safe to use the calculated DMA boundary. */ tp->dma_rwctrl = saved_dma_rwctrl; + } tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); } @@ -14436,13 +14307,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = pci_enable_device(pdev); if (err) { - pr_err("Cannot enable PCI device, aborting\n"); + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { - pr_err("Cannot obtain PCI resources, aborting\n"); + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } @@ -14451,14 +14322,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, /* Find power-management capability. */ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pm_cap == 0) { - pr_err("Cannot find PowerManagement capability, aborting\n"); + dev_err(&pdev->dev, + "Cannot find Power Management capability, aborting\n"); err = -EIO; goto err_out_free_res; } dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { - pr_err("Etherdev alloc failed, aborting\n"); + dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); err = -ENOMEM; goto err_out_free_res; } @@ -14508,7 +14380,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->regs = pci_ioremap_bar(pdev, BAR_0); if (!tp->regs) { - netdev_err(dev, "Cannot map device registers, aborting\n"); + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_free_dev; } @@ -14524,7 +14396,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = tg3_get_invariants(tp); if (err) { - netdev_err(dev, "Problem fetching invariants of chip, aborting\n"); + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } @@ -14559,7 +14432,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { - netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); goto err_out_iounmap; } } @@ -14567,7 +14441,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (err || dma_mask == DMA_BIT_MASK(32)) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - netdev_err(dev, "No usable DMA configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_out_iounmap; } } @@ -14616,14 +14491,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = tg3_get_device_address(tp); if (err) { - netdev_err(dev, "Could not obtain valid ethernet address, aborting\n"); + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); goto err_out_iounmap; } if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { - netdev_err(dev, "Cannot map APE registers, aborting\n"); + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } @@ -14647,7 +14524,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = tg3_test_dma(tp); if (err) { - netdev_err(dev, "DMA engine test failed, aborting\n"); + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); goto err_out_apeunmap; } @@ -14708,7 +14585,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, err = register_netdev(dev); if (err) { - netdev_err(dev, "Cannot register net device, aborting\n"); + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_apeunmap; } @@ -14721,11 +14598,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { struct phy_device *phydev; phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); } else - netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", - tg3_phy_string(tp), + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d])\n", tg3_phy_string(tp), ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : "10/100/1000Base-T")), diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 574a1cc4d35..ce9c4918c31 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -23,11 +23,8 @@ #define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ #define TG3_BDINFO_SIZE 0x10UL -#define RX_COPY_THRESHOLD 256 - #define TG3_RX_INTERNAL_RING_SZ_5906 32 -#define RX_STD_MAX_SIZE 1536 #define RX_STD_MAX_SIZE_5705 512 #define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ @@ -183,6 +180,7 @@ #define METAL_REV_B2 0x02 #define TG3PCI_DMA_RW_CTRL 0x0000006c #define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 #define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 #define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 #define DMA_RWCTRL_READ_BNDRY_16 0x00000100 @@ -252,7 +250,7 @@ /* 0x94 --> 0x98 unused */ #define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ #define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ -/* 0xa0 --> 0xb8 unused */ +/* 0xa8 --> 0xb8 unused */ #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 @@ -1854,6 +1852,8 @@ #define TG3_PCIE_TLDLPL_PORT 0x00007c00 #define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 #define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 +#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 +#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 /* OTP bit definitions */ #define TG3_OTP_AGCTGT_MASK 0x000000e0 @@ -2082,7 +2082,7 @@ #define MII_TG3_DSP_AADJ1CH0 0x001f #define MII_TG3_DSP_AADJ1CH3 0x601f #define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 -#define MII_TG3_DSP_EXP8 0x0708 +#define MII_TG3_DSP_EXP8 0x0f08 #define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 #define MII_TG3_DSP_EXP8_AEDW 0x0200 #define MII_TG3_DSP_EXP75 0x0f75 @@ -2512,7 +2512,7 @@ struct tg3_hw_stats { */ struct ring_info { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; struct tg3_config_info { @@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config { struct tg3_ethtool_stats { /* Statistics maintained by Receive MAC. */ - u64 rx_octets; + u64 rx_octets; u64 rx_fragments; u64 rx_ucast_packets; u64 rx_mcast_packets; @@ -2751,9 +2751,11 @@ struct tg3 { struct tg3_napi napi[TG3_IRQ_MAX_VECS]; void (*write32_rx_mbox) (struct tg3 *, u32, u32); + u32 rx_copy_thresh; u32 rx_pending; u32 rx_jumbo_pending; u32 rx_std_max_post; + u32 rx_offset; u32 rx_pkt_map_sz; #if TG3_VLAN_TAG_USED struct vlan_group *vlgrp; @@ -2773,7 +2775,6 @@ struct tg3 { unsigned long last_event_jiffies; }; - u32 rx_offset; u32 tg3_flags; #define TG3_FLAG_TAGGED_STATUS 0x00000001 #define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 390540c101c..8ffec22b74b 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev ) static void TLan_SetMulticastList( struct net_device *dev ) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 hash1 = 0; u32 hash2 = 0; int i; @@ -1336,12 +1336,12 @@ static void TLan_SetMulticastList( struct net_device *dev ) TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); } else { i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if ( i < 3 ) { TLan_SetMac( dev, i + 1, - (char *) &dmi->dmi_addr ); + (char *) &ha->addr); } else { - offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); + offset = TLan_HashFunc((u8 *)&ha->addr); if ( offset < 32 ) hash1 |= ( 1 << offset ); else @@ -2464,7 +2464,7 @@ static void TLan_PhyPrint( struct net_device *dev ) printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); } else if ( phy <= TLAN_PHY_MAX_ADDR ) { printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); - printk( "TLAN: Off. +0 +1 +2 +3 \n" ); + printk( "TLAN: Off. +0 +1 +2 +3\n" ); for ( i = 0; i < 0x20; i+= 4 ) { printk( "TLAN: 0x%02x", i ); TLan_MiiReadReg( dev, phy, i, &data0 ); diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 0fb930feea4..10800f16a23 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -63,6 +63,7 @@ #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/firmware.h> +#include <linux/slab.h> #include <net/checksum.h> @@ -76,7 +77,7 @@ static char version[] __devinitdata = #define FW_NAME "3com/3C359.bin" MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; -MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; +MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ; MODULE_FIRMWARE(FW_NAME); /* Module parameters */ @@ -162,19 +163,19 @@ static void print_tx_state(struct net_device *dev) u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; - printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head, + printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head, xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; - printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n"); + printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n"); for (i = 0; i < 16; i++) { txd = &(xl_priv->xl_tx_ring[i]) ; - printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd), + printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd), txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; } - printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) ); + printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) ); - printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); - printk("Queue status = %0x \n",netif_running(dev) ) ; + printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) ); + printk("Queue status = %0x\n",netif_running(dev) ) ; } static void print_rx_state(struct net_device *dev) @@ -185,19 +186,19 @@ static void print_rx_state(struct net_device *dev) u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; - printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ; - printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n"); + printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail); + printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n"); for (i = 0; i < 16; i++) { /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */ rxd = &(xl_priv->xl_rx_ring[i]) ; - printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd), + printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd), rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; } - printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) ); + printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR)); - printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); - printk("Queue status = %0x \n",netif_running(dev) ) ; + printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL)); + printk("Queue status = %0x\n",netif_running(dev)); } #endif @@ -390,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev) struct xl_private *xl_priv = netdev_priv(dev); int err; - printk(KERN_INFO "%s \n", version); + printk(KERN_INFO "%s\n", version); printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq); @@ -462,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev) writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); #if XL_DEBUG - printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ; + printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA)); #endif if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) { @@ -590,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev) #if XL_DEBUG writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; if ( readw(xl_mmio + MMIO_MACDATA) & 2) { - printk(KERN_INFO "Default ring speed 4 mbps \n") ; + printk(KERN_INFO "Default ring speed 4 mbps\n"); } else { - printk(KERN_INFO "Default ring speed 16 mbps \n") ; + printk(KERN_INFO "Default ring speed 16 mbps\n"); } printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb); #endif @@ -650,7 +651,7 @@ static int xl_open(struct net_device *dev) if (open_err != 0) { /* Something went wrong with the open command */ if (open_err & 0x07) { /* Wrong speed, retry at different speed */ - printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ; + printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name); switchsettings = switchsettings ^ 2 ; xl_ee_write(dev,0x08,switchsettings) ; xl_hw_reset(dev) ; @@ -702,7 +703,7 @@ static int xl_open(struct net_device *dev) } if (i==0) { - printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ; + printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name); free_irq(dev->irq,dev) ; kfree(xl_priv->xl_tx_ring); kfree(xl_priv->xl_rx_ring); @@ -852,7 +853,7 @@ static int xl_open_hw(struct net_device *dev) writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; - printk(", ARB: %04x \n",xl_priv->arb ) ; + printk(", ARB: %04x\n",xl_priv->arb ); writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ; @@ -866,7 +867,7 @@ static int xl_open_hw(struct net_device *dev) ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; } ver_str[i] = '\0' ; - printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str); + printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str); } /* @@ -990,7 +991,7 @@ static void xl_rx(struct net_device *dev) skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; if (skb==NULL) { /* Still need to fix the rx ring */ - printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ; + printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name); adv_rx_ring(dev) ; dev->stats.rx_dropped++ ; writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; @@ -1091,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) */ if (intstatus == 0x0001) { writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; - printk(KERN_INFO "%s: 00001 int received \n",dev->name) ; + printk(KERN_INFO "%s: 00001 int received\n",dev->name); } else { if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { @@ -1102,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) */ if (intstatus & HOSTERRINT) { - printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ; + printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus); writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; - printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); + printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); netif_stop_queue(dev) ; xl_freemem(dev) ; free_irq(dev->irq,dev); @@ -1127,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) Must put a timeout check here ! */ /* Empty Loop */ } - printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ; + printk(KERN_WARNING "%s: TX Underrun received\n",dev->name); writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; } /* TxUnderRun */ @@ -1156,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) macstatus = readw(xl_mmio + MMIO_MACDATA) ; printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name); if (macstatus & (1<<14)) - printk(KERN_WARNING "tchk error: Unrecoverable error \n") ; + printk(KERN_WARNING "tchk error: Unrecoverable error\n"); if (macstatus & (1<<3)) - printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ; + printk(KERN_WARNING "eint error: Internal watchdog timer expired\n"); if (macstatus & (1<<2)) - printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ; + printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n"); printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; - printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); + printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name); netif_stop_queue(dev) ; xl_freemem(dev) ; free_irq(dev->irq,dev); @@ -1174,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } } else { - printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ; + printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus); writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; } } @@ -1349,11 +1350,11 @@ static int xl_close(struct net_device *dev) writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD); if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { - printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ; + printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name); } else { writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; if (readb(xl_mmio + MMIO_MACDATA)==0) { - printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ; + printk(KERN_INFO "%s: Adapter has been closed\n",dev->name); writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; xl_freemem(dev) ; @@ -1390,7 +1391,7 @@ static int xl_close(struct net_device *dev) static void xl_set_rx_mode(struct net_device *dev) { struct xl_private *xl_priv = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[4] ; u16 options ; @@ -1407,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev) dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ @@ -1446,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev) printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; break ; case 4: - printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ; + printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd); break ; case 6: - printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ; + printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd); break ; case 0: /* Successful command execution */ @@ -1471,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev) break ; case SET_FUNC_ADDRESS: if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ; + printk(KERN_INFO "%s: Functional Address Set\n",dev->name); break ; case CLOSE_NIC: if(xl_priv->xl_message_level) - printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ; + printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name); break ; case SET_MULTICAST_MODE: if(xl_priv->xl_message_level) @@ -1484,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev) case SET_RECEIVE_MODE: if(xl_priv->xl_message_level) { if (xl_priv->xl_copy_all_options == 0x0004) - printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ; + printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name); else - printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ; + printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name); } break ; @@ -1556,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev) xl_freemem(dev) ; free_irq(dev->irq,dev); - printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; + printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); } /* If serious error */ if (xl_priv->xl_message_level) { if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing \n",dev->name); + printk(KERN_INFO "%s: Beaconing\n",dev->name); if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); if (lan_status_diff & LSC_TRAN_BCN) printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); if (lan_status_diff & LSC_RING_REC) printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); if (lan_status_diff & LSC_FDX_MODE) @@ -1578,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev) if (lan_status_diff & LSC_CO) { if (xl_priv->xl_message_level) - printk(KERN_INFO "%s: Counter Overflow \n", dev->name); + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); /* Issue READ.LOG command */ xl_srb_cmd(dev, READ_LOG) ; } @@ -1594,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev) } /* Lan.change.status */ else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */ #if XL_DEBUG - printk(KERN_INFO "Received.Data \n") ; + printk(KERN_INFO "Received.Data\n"); #endif writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ; @@ -1629,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev) xl_asb_cmd(dev) ; } else { - printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ; + printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd); } /* Acknowledge the arb interrupt */ @@ -1686,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev) ret_code = readb(xl_mmio + MMIO_MACDATA) ; switch (ret_code) { case 0x01: - printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ; + printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name); break ; case 0x26: - printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ; + printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name); break ; case 0x40: - printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ; + printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name); break ; } xl_priv->asb_queued = 0 ; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 1a0967246e2..eebdaae2432 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev) static void tok_set_multicast_list(struct net_device *dev) { struct tok_info *ti = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; unsigned char address[4]; int i; @@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev) /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; address[0] = address[1] = address[2] = address[3] = 0; - netdev_for_each_mc_addr(mclist, dev) { - address[0] |= mclist->dmi_addr[2]; - address[1] |= mclist->dmi_addr[3]; - address[2] |= mclist->dmi_addr[4]; - address[3] |= mclist->dmi_addr[5]; + netdev_for_each_mc_addr(ha, dev) { + address[0] |= ha->addr[2]; + address[1] |= ha->addr[3]; + address[2] |= ha->addr[4]; + address[3] |= ha->addr[5]; } SET_PAGE(ti->srb_page); for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index dd028fee9dc..5bd14070453 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -121,6 +121,7 @@ #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/jiffies.h> +#include <linux/slab.h> #include <net/net_namespace.h> #include <net/checksum.h> @@ -357,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev, pcr |= PCI_COMMAND_SERR; pci_write_config_word (pdev, PCI_COMMAND, pcr); - printk("%s \n", version); + printk("%s\n", version); printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name, streamer_priv->streamer_card_name, (unsigned int) dev->base_addr, @@ -650,7 +651,7 @@ static int streamer_open(struct net_device *dev) #if STREAMER_DEBUG writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA); - printk("srb open request: \n"); + printk("srb open request:\n"); for (i = 0; i < 16; i++) { printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); } @@ -700,7 +701,7 @@ static int streamer_open(struct net_device *dev) if (srb_word != 0) { if (srb_word == 0x07) { if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */ - printk(KERN_WARNING "%s: Retrying at different ring speed \n", + printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name); open_finished = 0; } else { @@ -716,7 +717,7 @@ static int streamer_open(struct net_device *dev) ((error_code & 0x0f) == 0x0d)) { printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name); - printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name); + printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name); free_irq(dev->irq, dev); return -EIO; } @@ -922,7 +923,7 @@ static void streamer_rx(struct net_device *dev) if (rx_desc->status & 0x7E830000) { /* errors */ if (streamer_priv->streamer_message_level) { - printk(KERN_WARNING "%s: Rx Error %x \n", + printk(KERN_WARNING "%s: Rx Error %x\n", dev->name, rx_desc->status); } } else { /* received without errors */ @@ -935,7 +936,7 @@ static void streamer_rx(struct net_device *dev) if (skb == NULL) { - printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); + printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name); dev->stats.rx_dropped++; } else { /* we allocated an skb OK */ if (buffer_cnt == 1) { @@ -1266,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev) netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 options = 0; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[5]; writel(streamer_priv->srb, streamer_mmio + LAPA); @@ -1302,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev) writel(streamer_priv->srb,streamer_mmio+LAPA); dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); @@ -1363,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev) case 0x00: break; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name); + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); @@ -1391,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev) case 0x00: break; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); break; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); break; case 0x39: /* Must deal with this if individual multicast addresses used */ - printk(KERN_INFO "%s: Group address not found \n", dev->name); + printk(KERN_INFO "%s: Group address not found\n", dev->name); break; default: break; @@ -1413,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev) switch (srb_word) { case 0x00: if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name); + printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name); break; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); break; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); @@ -1447,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev) } break; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); break; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); @@ -1466,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev) printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name); break; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name); break; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); @@ -1555,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev) (streamer_mmio + LAPDINC))); } - printk("next %04x, fs %02x, len %04x \n", next, + printk("next %04x, fs %02x, len %04x\n", next, status, len); } #endif @@ -1592,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev) mac_frame->protocol = tr_type_trans(mac_frame, dev); #if STREAMER_NETWORK_MONITOR - printk(KERN_WARNING "%s: Received MAC Frame, details: \n", + printk(KERN_WARNING "%s: Received MAC Frame, details:\n", dev->name); mac_hdr = tr_hdr(mac_frame); printk(KERN_WARNING @@ -1668,15 +1669,15 @@ drop_frame: /* If serious error */ if (streamer_priv->streamer_message_level) { if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected \n", dev->name); + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing \n", dev->name); + printk(KERN_INFO "%s: Beaconing\n", dev->name); if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name); + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); if (lan_status_diff & LSC_TRAN_BCN) printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); if (lan_status_diff & LSC_RING_REC) printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name); if (lan_status_diff & LSC_FDX_MODE) @@ -1685,7 +1686,7 @@ drop_frame: if (lan_status_diff & LSC_CO) { if (streamer_priv->streamer_message_level) - printk(KERN_INFO "%s: Counter Overflow \n", dev->name); + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); /* Issue READ.LOG command */ @@ -1715,7 +1716,7 @@ drop_frame: streamer_priv->streamer_lan_status = lan_status; } /* Lan.change.status */ else - printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); + printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); } static void streamer_asb_bh(struct net_device *dev) @@ -1746,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev) rc=ntohs(readw(streamer_mmio+LAPD)) >> 8; switch (rc) { case 0x01: - printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); break; case 0x26: - printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); break; case 0xFF: /* Valid response, everything should be ok again */ diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c index 456f8bff40b..53f631ebb16 100644 --- a/drivers/net/tokenring/madgemc.c +++ b/drivers/net/tokenring/madgemc.c @@ -21,6 +21,7 @@ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n"; #include <linux/module.h> #include <linux/mca.h> +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 3a25e0434ae..3d2fbe60b46 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev) olympic_priv=netdev_priv(dev); olympic_mmio=olympic_priv->olympic_mmio; - printk("%s \n", version); + printk("%s\n", version); printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq); writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL); @@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev) #if OLYMPIC_DEBUG printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK)); - printk("Before the open command \n"); + printk("Before the open command\n"); #endif do { memset_io(init_srb,0,SRB_COMMAND_SIZE); @@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev) break; } if (time_after(jiffies, t + 10*HZ)) { - printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; + printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); olympic_priv->srb_queued=0; break ; } @@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev) break; case 0x07: if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */ - printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name); + printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name); open_finished = 0 ; continue; } @@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev) if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name); - printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name); + printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name); } else { printk(KERN_WARNING "%s: %s - %s\n", dev->name, open_maj_error[(err & 0xf0) >> 4], @@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev) olympic_priv->rx_status_last_received++ ; olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1); #if OLYMPIC_DEBUG - printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); + printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); #endif length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; @@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev) if (l_status_buffercnt & 0x3B000000) { if (olympic_priv->olympic_message_level) { if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ - printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name); + printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name); if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ - printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name); + printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name); if (l_status_buffercnt & (1<<27)) /* No receive buffers */ - printk(KERN_WARNING "%s: No receive buffers \n",dev->name); + printk(KERN_WARNING "%s: No receive buffers\n",dev->name); if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ - printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name); + printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name); if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ - printk(KERN_WARNING "%s: Received Error Detect \n",dev->name); + printk(KERN_WARNING "%s: Received Error Detect\n",dev->name); } olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; @@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev) } if (skb == NULL) { - printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; + printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ; dev->stats.rx_dropped++; /* Update counters even though we don't transfer the frame */ olympic_priv->rx_ring_last_received += i ; @@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev) } if (t == 0) { - printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ; + printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name); } olympic_priv->srb_queued=0; } @@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev) u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 options = 0; u8 __iomem *srb; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[4] ; writel(olympic_priv->srb,olympic_mmio+LAPA); @@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev) dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } writeb(SRB_SET_FUNC_ADDRESS,srb+0); @@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev) case 0x00: break ; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break ; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); @@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev) case 0x00: break ; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break ; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; break ; case 0x39: /* Must deal with this if individual multicast addresses used */ - printk(KERN_INFO "%s: Group address not found \n",dev->name); + printk(KERN_INFO "%s: Group address not found\n",dev->name); break ; default: break ; @@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev) switch (readb(srb+2)) { case 0x00: if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ; + printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name); break ; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break ; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; @@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev) printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; break ; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break ; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; @@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev) printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; break ; case 0x01: - printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; + printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name); break ; case 0x04: printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; @@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev) printk("Loc %d = %02x\n",i,readb(frame_data + i)); } - printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); + printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); } #endif mac_frame = dev_alloc_skb(frame_len) ; @@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev) if (olympic_priv->olympic_network_monitor) { struct trh_hdr *mac_hdr; - printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); + printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name); mac_hdr = tr_hdr(mac_frame); printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n", dev->name, mac_hdr->daddr); @@ -1489,20 +1489,20 @@ drop_frame: writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); netif_stop_queue(dev); olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; - printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; + printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name); } /* If serious error */ if (olympic_priv->olympic_message_level) { if (lan_status_diff & LSC_SIG_LOSS) - printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; + printk(KERN_WARNING "%s: No receive signal detected\n", dev->name); if (lan_status_diff & LSC_HARD_ERR) - printk(KERN_INFO "%s: Beaconing \n",dev->name); + printk(KERN_INFO "%s: Beaconing\n",dev->name); if (lan_status_diff & LSC_SOFT_ERR) - printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); + printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); if (lan_status_diff & LSC_TRAN_BCN) printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); if (lan_status_diff & LSC_SS) - printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); + printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); if (lan_status_diff & LSC_RING_REC) printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); if (lan_status_diff & LSC_FDX_MODE) @@ -1512,7 +1512,7 @@ drop_frame: if (lan_status_diff & LSC_CO) { if (olympic_priv->olympic_message_level) - printk(KERN_INFO "%s: Counter Overflow \n", dev->name); + printk(KERN_INFO "%s: Counter Overflow\n", dev->name); /* Issue READ.LOG command */ @@ -1551,7 +1551,7 @@ drop_frame: } /* Lan.change.status */ else - printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); + printk(KERN_WARNING "%s: Unknown arb command\n", dev->name); } static void olympic_asb_bh(struct net_device *dev) @@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev) if (olympic_priv->asb_queued == 2) { switch (readb(asb_block+2)) { case 0x01: - printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name); break ; case 0x26: - printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); + printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name); break ; case 0xFF: /* Valid response, everything should be ok again */ diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index 5401d86a7be..e40560137c4 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -36,7 +36,6 @@ #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/time.h> #include <linux/errno.h> diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 21a01753312..c169fd05dde 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -85,7 +85,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/time.h> #include <linux/errno.h> @@ -693,7 +692,7 @@ static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb, * NOTE: This function should be used whenever the status of any TPL must be * modified by the driver, because the compiler may otherwise change the * order of instructions such that writing the TPL status may be executed at - * an undesireable time. When this function is used, the status is always + * an undesirable time. When this function is used, the status is always * written when the function is called. */ static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status) @@ -1212,17 +1211,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev) } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { ((char *)(&tp->ocpl.FunctAddr))[0] |= - mclist->dmi_addr[2]; + ha->addr[2]; ((char *)(&tp->ocpl.FunctAddr))[1] |= - mclist->dmi_addr[3]; + ha->addr[3]; ((char *)(&tp->ocpl.FunctAddr))[2] |= - mclist->dmi_addr[4]; + ha->addr[4]; ((char *)(&tp->ocpl.FunctAddr))[3] |= - mclist->dmi_addr[5]; + ha->addr[5]; } } tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); @@ -1391,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev) Status &= STS_MASK; if(tms380tr_debug > 3) - printk(KERN_DEBUG " %04X \n", Status); + printk(KERN_DEBUG " %04X\n", Status); /* BUD successfully completed */ if(Status == STS_INITIALIZE) return (1); @@ -1847,7 +1846,7 @@ static void tms380tr_chk_irq(struct net_device *dev) break; case DMA_WRITE_ABORT: - printk(KERN_INFO "%s: DMA write operation aborted: \n", + printk(KERN_INFO "%s: DMA write operation aborted:\n", dev->name); switch (AdapterCheckBlock[1]) { @@ -2264,7 +2263,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) * This function should be used whenever the status of any RPL must be * modified by the driver, because the compiler may otherwise change the * order of instructions such that writing the RPL status may be executed - * at an undesireable time. When this function is used, the status is + * at an undesirable time. When this function is used, the status is * always written when the function is called. */ static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 647cdd1d4e2..a03730bd1da 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -38,7 +38,6 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/crc32.h> @@ -48,6 +47,7 @@ #include <linux/rtnetlink.h> #include <linux/timer.h> #include <linux/platform_device.h> +#include <linux/gfp.h> #include <asm/system.h> #include <asm/io.h> @@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data, return; udelay(10); } - printk(KERN_ERR "%s function time out \n", __func__); + printk(KERN_ERR "%s function time out\n", __func__); } static int mii_speed(struct mii_if_info *mii) @@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev) if (i == 0) { data->txring[tx].buf0 = dma_map_single(NULL, skb->data, - skb->len - skb->data_len, DMA_TO_DEVICE); - data->txring[tx].len = skb->len - skb->data_len; + skb_headlen(skb), DMA_TO_DEVICE); + data->txring[tx].len = skb_headlen(skb); misc |= TSI108_TX_SOF; } else { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; @@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev) return; udelay(10); } - printk(KERN_ERR "%s function time out \n", __func__); + printk(KERN_ERR "%s function time out\n", __func__); } static void tsi108_reset_ether(struct tsi108_prv_data * data) @@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { int i; - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; memset(data->mc_hash, 0, sizeof(data->mc_hash)); - netdev_for_each_mc_addr(mc, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 hash, crc; - crc = ether_crc(6, mc->dmi_addr); + crc = ether_crc(6, ha->addr); hash = crc >> 23; __set_bit(hash, &data->mc_hash[0]); } @@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev) udelay(10); } if (i == 0) - printk(KERN_ERR "%s function time out \n", __func__); + printk(KERN_ERR "%s function time out\n", __func__); if (data->phy_type == TSI108_PHY_BCM54XX) { tsi108_write_mii(data, 0x09, 0x0300); diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index cb429723b2c..9c0f29ce8ba 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -42,6 +42,7 @@ #include <linux/compiler.h> #include <linux/rtnetlink.h> #include <linux/crc32.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> @@ -670,15 +671,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); u16 hash_table[32]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ - netdev_for_each_mc_addr(mclist, dev) { - int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } @@ -699,13 +700,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (u16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index c4ecb9a9540..d818456f471 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -450,7 +450,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/pci.h> #include <linux/eisa.h> #include <linux/delay.h> @@ -467,6 +466,7 @@ #include <linux/dma-mapping.h> #include <linux/moduleparam.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/dma.h> @@ -1951,7 +1951,7 @@ static void SetMulticastFilter(struct net_device *dev) { struct de4x5_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u_long iobase = dev->base_addr; int i, bit, byte; u16 hashcode; @@ -1966,8 +1966,8 @@ SetMulticastFilter(struct net_device *dev) if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { omr |= OMR_PM; /* Pass all multicasts */ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc_le(ETH_ALEN, addrs); hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ @@ -1983,8 +1983,8 @@ SetMulticastFilter(struct net_device *dev) } } } else { /* Perfect filtering */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; for (i=0; i<ETH_ALEN; i++) { *(pa + (i&1)) = *addrs++; if (i & 0x01) pa += 4; @@ -5077,7 +5077,7 @@ mii_get_phy(struct net_device *dev) lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ lp->mii_cnt++; lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name); + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); j = de4x5_debug; de4x5_debug |= DEBUG_MII; de4x5_dbg_mii(dev, k); @@ -5337,7 +5337,7 @@ de4x5_dbg_open(struct net_device *dev) } } printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); - printk("Ring size: \nRX: %d\nTX: %d\n", + printk("Ring size:\nRX: %d\nTX: %d\n", (short)lp->rxRingSize, (short)lp->txRingSize); } diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 95b38d803e9..7278ecb823c 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -74,7 +74,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -1454,7 +1453,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) static void dm9132_id_table(struct DEVICE *dev) { - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; u16 * addrptr; unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */ u32 hash_val; @@ -1478,8 +1477,8 @@ static void dm9132_id_table(struct DEVICE *dev) hash_table[3] = 0x8000; /* the multicast address in Hash Table : 64 bits */ - netdev_for_each_mc_addr(mcptr, dev) { - hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } @@ -1497,7 +1496,7 @@ static void dm9132_id_table(struct DEVICE *dev) static void send_filter_frame(struct DEVICE *dev) { struct dmfe_board_info *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; u32 * suptr; @@ -1520,8 +1519,8 @@ static void send_filter_frame(struct DEVICE *dev) *suptr++ = 0xffff; /* fit the multicast address */ - netdev_for_each_mc_addr(mcptr, dev) { - addrptr = (u16 *) mcptr->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; *suptr++ = addrptr[0]; *suptr++ = addrptr[1]; *suptr++ = addrptr[2]; diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c index 93f4e8309f8..6002e651b9e 100644 --- a/drivers/net/tulip/eeprom.c +++ b/drivers/net/tulip/eeprom.c @@ -13,6 +13,7 @@ */ #include <linux/pci.h> +#include <linux/slab.h> #include "tulip.h" #include <linux/init.h> #include <asm/unaligned.h> @@ -143,6 +144,12 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) void __devinit tulip_parse_eeprom(struct net_device *dev) { + /* + dev is not registered at this point, so logging messages can't + use dev_<level> or netdev_<level> but dev->name is good via a + hack in the caller + */ + /* The last media info list parsed, for multiport boards. */ static struct mediatable *last_mediatable; static unsigned char *last_ee_data; @@ -161,15 +168,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev) if (ee_data[0] == 0xff) { if (last_mediatable) { controller_index++; - dev_info(&dev->dev, - "Controller %d of multiport board\n", - controller_index); + pr_info("%s: Controller %d of multiport board\n", + dev->name, controller_index); tp->mtable = last_mediatable; ee_data = last_ee_data; goto subsequent_board; } else - dev_info(&dev->dev, - "Missing EEPROM, this interface may not work correctly!\n"); + pr_info("%s: Missing EEPROM, this interface may not work correctly!\n", + dev->name); return; } /* Do a fix-up based on the vendor half of the station address prefix. */ @@ -181,15 +187,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev) i++; /* An Accton EN1207, not an outlaw Maxtech. */ memcpy(ee_data + 26, eeprom_fixups[i].newtable, sizeof(eeprom_fixups[i].newtable)); - dev_info(&dev->dev, - "Old format EEPROM on '%s' board. Using substitute media control info\n", - eeprom_fixups[i].name); + pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n", + dev->name, eeprom_fixups[i].name); break; } } if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ - dev_info(&dev->dev, - "Old style EEPROM with no media selection information\n"); + pr_info("%s: Old style EEPROM with no media selection information\n", + dev->name); return; } } @@ -217,8 +222,8 @@ subsequent_board: /* there is no phy information, don't even try to build mtable */ if (count == 0) { if (tulip_debug > 0) - dev_warn(&dev->dev, - "no phy info, aborting mtable build\n"); + pr_warning("%s: no phy info, aborting mtable build\n", + dev->name); return; } @@ -234,8 +239,10 @@ subsequent_board: mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; mtable->csr15dir = mtable->csr15val = 0; - dev_info(&dev->dev, "EEPROM default media type %s\n", - media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]); + pr_info("%s: EEPROM default media type %s\n", + dev->name, + media & 0x0800 ? "Autosense" + : medianame[media & MEDIA_MASK]); for (i = 0; i < count; i++) { struct medialeaf *leaf = &mtable->mleaf[i]; @@ -298,17 +305,17 @@ subsequent_board: } if (tulip_debug > 1 && leaf->media == 11) { unsigned char *bp = leaf->leafdata; - dev_info(&dev->dev, - "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n", - bp[0], bp[1], bp[2 + bp[1]*2], - bp[5 + bp[2 + bp[1]*2]*2], - bp[4 + bp[2 + bp[1]*2]*2]); + pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n", + dev->name, + bp[0], bp[1], bp[2 + bp[1]*2], + bp[5 + bp[2 + bp[1]*2]*2], + bp[4 + bp[2 + bp[1]*2]*2]); } - dev_info(&dev->dev, - "Index #%d - Media %s (#%d) described by a %s (%d) block\n", - i, medianame[leaf->media & 15], leaf->media, - leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", - leaf->type); + pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n", + dev->name, + i, medianame[leaf->media & 15], leaf->media, + leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", + leaf->type); } if (new_advertise) tp->sym_advertise = new_advertise; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 7f544ef2f5f..22e766e9006 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include <linux/slab.h> #include "tulip.h" #include <linux/init.h> #include <linux/etherdevice.h> @@ -990,15 +991,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ - netdev_for_each_mc_addr(mclist, dev) { - int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } @@ -1018,13 +1019,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (u16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; @@ -1061,7 +1062,7 @@ static void set_rx_mode(struct net_device *dev) } else if (tp->flags & MC_HASH_ONLY) { /* Some work-alikes have only a 64-entry hash filter table. */ /* Should verify correctness on big-endian/__powerpc__ */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; if (netdev_mc_count(dev) > 64) { /* Arbitrary non-effective limit. */ tp->csr6 |= AcceptAllMulticast; @@ -1069,18 +1070,21 @@ static void set_rx_mode(struct net_device *dev) } else { u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ int filterbit; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (tp->flags & COMET_MAC_ADDR) - filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); + filterbit = ether_crc_le(ETH_ALEN, + ha->addr); else - filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + filterbit = ether_crc(ETH_ALEN, + ha->addr) >> 26; filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) dev_info(&dev->dev, "Added filter for %pM %08x bit %d\n", - mclist->dmi_addr, - ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); + ha->addr, + ether_crc(ETH_ALEN, ha->addr), + filterbit); } if (mc_filter[0] == tp->mc_filter[0] && mc_filter[1] == tp->mc_filter[1]) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 0ab05af237e..c7f97285292 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -25,7 +25,6 @@ #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> @@ -851,13 +850,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info if ( !(rdes0 & 0x8000) || ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { + struct sk_buff *new_skb = NULL; + skb = rxptr->rx_skb_ptr; /* Good packet, send to upper layer */ /* Shorst packet used new SKB */ - if ( (rxlen < RX_COPY_SIZE) && - ( (skb = dev_alloc_skb(rxlen + 2) ) - != NULL) ) { + if ((rxlen < RX_COPY_SIZE) && + (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { + skb = new_skb; /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ memcpy(skb_put(skb, rxlen), @@ -1392,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; u32 * suptr; @@ -1415,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) *suptr++ = 0xffff << FLT_SHIFT; /* fit the multicast address */ - netdev_for_each_mc_addr(mcptr, dev) { - addrptr = (u16 *) mcptr->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 304f43866c4..18c8cedf4ca 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -114,7 +114,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -1367,13 +1366,15 @@ static u32 __set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; - filterbit &= 0x3f; - mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); + netdev_for_each_mc_addr(ha, dev) { + int filbit; + + filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; + filbit &= 0x3f; + mc_filter[filbit >> 5] |= 1 << (filbit & 31); } rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index acfeeb98056..a439e93be22 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance) #ifdef DEBUG print_binary(status); - printk("tx status 0x%08x 0x%08x \n", + printk("tx status 0x%08x 0x%08x\n", card->tx_buffer[0], card->tx_buffer[4]); - printk("rx status 0x%08x 0x%08x \n", + printk("rx status 0x%08x 0x%08x\n", card->rx_buffer[0], card->rx_buffer[4]); #endif /* Handle shared irq and hotplug */ @@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev) struct xircom_private *xp = netdev_priv(dev); int retval; enter("xircom_open"); - pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n", + pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n", dev->name, dev->irq); retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); if (retval) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ce1efa4c0b0..43265207d46 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) } } + /* Orphan the skb - required as we might hang on to it + * for indefinite time. */ + skb_orphan(skb); + /* Enqueue packet */ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); dev->trans_start = jiffies; @@ -1437,7 +1441,7 @@ static int tun_chr_close(struct inode *inode, struct file *file) __tun_detach(tun); - /* If desireable, unregister the netdevice. */ + /* If desirable, unregister the netdevice. */ if (!(tun->flags & TUN_PERSIST)) { rtnl_lock(); if (dev->reg_state == NETREG_REGISTERED) diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 1cf012d3e07..b94c4cce93c 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -109,7 +109,6 @@ static const int multicast_filter_limit = 32; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -921,11 +920,11 @@ typhoon_set_rx_mode(struct net_device *dev) /* Too many to match, or accept all multicasts. */ filter |= TYPHOON_RX_FILTER_ALL_MCAST; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit >> 5] |= 1 << (bit & 0x1f); } @@ -2098,7 +2097,7 @@ typhoon_tx_timeout(struct net_device *dev) if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { netdev_warn(dev, "could not reset in tx timeout\n"); - goto truely_dead; + goto truly_dead; } /* If we ever start using the Hi ring, it will need cleaning too */ @@ -2107,13 +2106,13 @@ typhoon_tx_timeout(struct net_device *dev) if(typhoon_start_runtime(tp) < 0) { netdev_err(dev, "could not start runtime in tx timeout\n"); - goto truely_dead; + goto truly_dead; } netif_wake_queue(dev); return; -truely_dead: +truly_dead: /* Reset the hardware, and turn off carrier to avoid more timeouts */ typhoon_reset(tp->ioaddr, NoWait); netif_carrier_off(dev); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 23a97518bc1..081f76bff34 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -430,7 +430,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); /* Ethernet frames are defined in Little Endian mode, - therefor to insert */ + therefore to insert */ /* the address to the hash (Big Endian mode), we reverse the bytes.*/ set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); @@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) static void ucc_geth_set_multi(struct net_device *dev) { struct ucc_geth_private *ugeth; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct ucc_fast __iomem *uf_regs; struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; @@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev) out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now. */ - if (!(dmi->dmi_addr[0] & 1)) + if (!(ha->addr[0] & 1)) continue; /* Ask CPM to run CRC and set bit in * filter mask. */ - hw_add_addr_in_hash(ugeth, dmi->dmi_addr); + hw_add_addr_in_hash(ugeth, ha->addr); } } } @@ -3883,7 +3883,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma } if (netif_msg_probe(&debug)) - printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", + printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, ug_info->uf_info.irq); diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 7075f26e97d..6f92e48f02d 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -18,7 +18,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> -#include <linux/slab.h> #include <linux/stddef.h> #include <linux/interrupt.h> #include <linux/netdevice.h> diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 32d93564a74..ba56ce4382d 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -204,6 +204,14 @@ config USB_NET_DM9601 This option adds support for Davicom DM9601 based USB 1.1 10/100 Ethernet adapters. +config USB_NET_SMSC75XX + tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" + depends on USB_USBNET + select CRC32 + help + This option adds support for SMSC LAN95XX based USB 2.0 + Gigabit Ethernet adapters. + config USB_NET_SMSC95XX tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" depends on USB_USBNET diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index e17afb78f37..82ea62955b5 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_USB_NET_AX8817X) += asix.o obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o obj-$(CONFIG_USB_NET_DM9601) += dm9601.o +obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o obj-$(CONFIG_USB_NET_GL620A) += gl620a.o obj-$(CONFIG_USB_NET_NET1080) += net1080.o diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 20e34608fa4..8e7d2374558 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -34,6 +34,7 @@ #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> +#include <linux/slab.h> #define DRIVER_VERSION "14-Jun-2006" static const char driver_name [] = "asix"; @@ -54,6 +55,7 @@ static const char driver_name [] = "asix"; #define AX_CMD_WRITE_IPG0 0x12 #define AX_CMD_WRITE_IPG1 0x13 #define AX_CMD_READ_NODE_ID 0x13 +#define AX_CMD_WRITE_NODE_ID 0x14 #define AX_CMD_WRITE_IPG2 0x14 #define AX_CMD_WRITE_MULTI_FILTER 0x16 #define AX88172_CMD_READ_NODE_ID 0x17 @@ -165,6 +167,7 @@ static const char driver_name [] = "asix"; /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ struct asix_data { u8 multi_filter[AX_MCAST_FILTER_SIZE]; + u8 mac_addr[ETH_ALEN]; u8 phymode; u8 ledmode; u8 eeprom_len; @@ -555,16 +558,14 @@ static void asix_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = - ether_crc(ETH_ALEN, - mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } @@ -732,6 +733,30 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd) return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } +static int asix_set_mac_address(struct net_device *net, void *p) +{ + struct usbnet *dev = netdev_priv(net); + struct asix_data *data = (struct asix_data *)&dev->data; + struct sockaddr *addr = p; + + if (netif_running(net)) + return -EBUSY; + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); + + /* We use the 20 byte dev->data + * for our 6 byte mac buffer + * to avoid allocating memory that + * is tricky to free later */ + memcpy(data->mac_addr, addr->sa_data, ETH_ALEN); + asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, + data->mac_addr); + + return 0; +} + /* We need to override some ethtool_ops so we require our own structure so we don't interfere with other usbnet devices that may be connected at the same time. */ @@ -767,16 +792,14 @@ static void ax88172_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = - ether_crc(ETH_ALEN, - mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } @@ -919,7 +942,7 @@ static const struct net_device_ops ax88772_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = asix_ioctl, .ndo_set_multicast_list = asix_set_multicast, @@ -1213,7 +1236,7 @@ static const struct net_device_ops ax88178_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = asix_set_multicast, .ndo_do_ioctl = asix_ioctl, diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 96f1ebe0d34..97687d33590 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -36,7 +36,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> @@ -44,6 +43,7 @@ #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> +#include <linux/gfp.h> #include <asm/uaccess.h> #undef DEBUG @@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast) static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u8 broadcast[6]; u8 rx = RxEnable | RxPolarity | RxMultiCast; @@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { - netdev_for_each_mc_addr(mc, netdev) { - u32 crc = ether_crc_le(6, mc->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 6491c9c00c8..dc9444525b4 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/gfp.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/netdevice.h> diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index a4a85a6ed86..5f3b97668e6 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -30,6 +30,7 @@ #include <linux/crc32.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> +#include <linux/gfp.h> /* diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 269339769f4..291add25524 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -21,6 +21,7 @@ #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> +#include <linux/slab.h> /* datasheet: http://ptm2.cc.utu.fi/ftp/network/cards/DM9601/From_NET/DM9601-DS-P01-930914.pdf @@ -386,10 +387,10 @@ static void dm9601_set_multicast(struct net_device *net) netdev_mc_count(net) > DM_MAX_MCAST) { rx_ctl |= 0x04; } else if (!netdev_mc_empty(net)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_list, net) { - u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; hashes[crc >> 3] |= 1 << (crc & 0x7); } } diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index f7ccfad9384..dcd57c37ef7 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -30,6 +30,7 @@ #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> +#include <linux/gfp.h> /* diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 6895f153123..be0cc99e881 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1155,9 +1155,6 @@ static void _hso_serial_set_termios(struct tty_struct *tty, static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) { int result; -#ifdef CONFIG_HSO_AUTOPM - usb_mark_last_busy(urb->dev); -#endif /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame */ usb_fill_bulk_urb(urb, serial->parent->usb, diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c index 3c228df5706..be02a25da71 100644 --- a/drivers/net/usb/int51x1.c +++ b/drivers/net/usb/int51x1.c @@ -29,6 +29,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/slab.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 70978219e98..834d8cd3005 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -44,6 +44,7 @@ #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> @@ -452,12 +453,12 @@ static void mcs7830_data_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } } diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index bdcad45954a..961a8ed38d8 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -29,6 +29,7 @@ #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> +#include <linux/slab.h> #include <asm/unaligned.h> diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 4ce331fb1e1..dd8a4adf48c 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -22,6 +22,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> +#include <linux/slab.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c new file mode 100644 index 00000000000..753ee6eb7ed --- /dev/null +++ b/drivers/net/usb/smsc75xx.c @@ -0,0 +1,1289 @@ + /*************************************************************************** + * + * Copyright (C) 2007-2010 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *****************************************************************************/ + +#include <linux/module.h> +#include <linux/kmod.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/usb/usbnet.h> +#include <linux/slab.h> +#include "smsc75xx.h" + +#define SMSC_CHIPNAME "smsc75xx" +#define SMSC_DRIVER_VERSION "1.0.0" +#define HS_USB_PKT_SIZE (512) +#define FS_USB_PKT_SIZE (64) +#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) +#define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) +#define DEFAULT_BULK_IN_DELAY (0x00002000) +#define MAX_SINGLE_PACKET_SIZE (9000) +#define LAN75XX_EEPROM_MAGIC (0x7500) +#define EEPROM_MAC_OFFSET (0x01) +#define DEFAULT_TX_CSUM_ENABLE (true) +#define DEFAULT_RX_CSUM_ENABLE (true) +#define DEFAULT_TSO_ENABLE (true) +#define SMSC75XX_INTERNAL_PHY_ID (1) +#define SMSC75XX_TX_OVERHEAD (8) +#define MAX_RX_FIFO_SIZE (20 * 1024) +#define MAX_TX_FIFO_SIZE (12 * 1024) +#define USB_VENDOR_ID_SMSC (0x0424) +#define USB_PRODUCT_ID_LAN7500 (0x7500) +#define USB_PRODUCT_ID_LAN7505 (0x7505) + +#define check_warn(ret, fmt, args...) \ + ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) + +#define check_warn_return(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) + +#define check_warn_goto_done(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } }) + +struct smsc75xx_priv { + struct usbnet *dev; + u32 rfe_ctl; + u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; + bool use_rx_csum; + struct mutex dataport_mutex; + spinlock_t rfe_ctl_lock; + struct work_struct set_multicast; +}; + +struct usb_context { + struct usb_ctrlrequest req; + struct usbnet *dev; +}; + +static int turbo_mode = true; +module_param(turbo_mode, bool, 0644); +MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); + +static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, + u32 *data) +{ + u32 *buf = kmalloc(4, GFP_KERNEL); + int ret; + + BUG_ON(!dev); + + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_READ_REGISTER, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); + + if (unlikely(ret < 0)) + netdev_warn(dev->net, + "Failed to read register index 0x%08x", index); + + le32_to_cpus(buf); + *data = *buf; + kfree(buf); + + return ret; +} + +static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, + u32 data) +{ + u32 *buf = kmalloc(4, GFP_KERNEL); + int ret; + + BUG_ON(!dev); + + if (!buf) + return -ENOMEM; + + *buf = data; + cpu_to_le32s(buf); + + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_WRITE_REGISTER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); + + if (unlikely(ret < 0)) + netdev_warn(dev->net, + "Failed to write register index 0x%08x", index); + + kfree(buf); + + return ret; +} + +/* Loop until the read is completed with timeout + * called with phy_mutex held */ +static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = smsc75xx_read_reg(dev, MII_ACCESS, &val); + check_warn_return(ret, "Error reading MII_ACCESS"); + + if (!(val & MII_ACCESS_BUSY)) + return 0; + } while (!time_after(jiffies, start_time + HZ)); + + return -EIO; +} + +static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) +{ + struct usbnet *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = smsc75xx_phy_wait_not_busy(dev); + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read"); + + /* set the address, index & direction (read from PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) + | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) + | MII_ACCESS_READ; + ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); + check_warn_goto_done(ret, "Error writing MII_ACCESS"); + + ret = smsc75xx_phy_wait_not_busy(dev); + check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx); + + ret = smsc75xx_read_reg(dev, MII_DATA, &val); + check_warn_goto_done(ret, "Error reading MII_DATA"); + + ret = (u16)(val & 0xFFFF); + +done: + mutex_unlock(&dev->phy_mutex); + return ret; +} + +static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, + int regval) +{ + struct usbnet *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = smsc75xx_phy_wait_not_busy(dev); + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write"); + + val = regval; + ret = smsc75xx_write_reg(dev, MII_DATA, val); + check_warn_goto_done(ret, "Error writing MII_DATA"); + + /* set the address, index & direction (write to PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) + | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) + | MII_ACCESS_WRITE; + ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); + check_warn_goto_done(ret, "Error writing MII_ACCESS"); + + ret = smsc75xx_phy_wait_not_busy(dev); + check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx); + +done: + mutex_unlock(&dev->phy_mutex); +} + +static int smsc75xx_wait_eeprom(struct usbnet *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = smsc75xx_read_reg(dev, E2P_CMD, &val); + check_warn_return(ret, "Error reading E2P_CMD"); + + if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) + break; + udelay(40); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { + netdev_warn(dev->net, "EEPROM read operation timeout"); + return -EIO; + } + + return 0; +} + +static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = smsc75xx_read_reg(dev, E2P_CMD, &val); + check_warn_return(ret, "Error reading E2P_CMD"); + + if (!(val & E2P_CMD_BUSY)) + return 0; + + udelay(40); + } while (!time_after(jiffies, start_time + HZ)); + + netdev_warn(dev->net, "EEPROM is busy"); + return -EIO; +} + +static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, + u8 *data) +{ + u32 val; + int i, ret; + + BUG_ON(!dev); + BUG_ON(!data); + + ret = smsc75xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + for (i = 0; i < length; i++) { + val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); + ret = smsc75xx_write_reg(dev, E2P_CMD, val); + check_warn_return(ret, "Error writing E2P_CMD"); + + ret = smsc75xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + ret = smsc75xx_read_reg(dev, E2P_DATA, &val); + check_warn_return(ret, "Error reading E2P_DATA"); + + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, + u8 *data) +{ + u32 val; + int i, ret; + + BUG_ON(!dev); + BUG_ON(!data); + + ret = smsc75xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + /* Issue write/erase enable command */ + val = E2P_CMD_BUSY | E2P_CMD_EWEN; + ret = smsc75xx_write_reg(dev, E2P_CMD, val); + check_warn_return(ret, "Error writing E2P_CMD"); + + ret = smsc75xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + for (i = 0; i < length; i++) { + + /* Fill data register */ + val = data[i]; + ret = smsc75xx_write_reg(dev, E2P_DATA, val); + check_warn_return(ret, "Error writing E2P_DATA"); + + /* Send "write" command */ + val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); + ret = smsc75xx_write_reg(dev, E2P_CMD, val); + check_warn_return(ret, "Error writing E2P_CMD"); + + ret = smsc75xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + offset++; + } + + return 0; +} + +static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) +{ + int i, ret; + + for (i = 0; i < 100; i++) { + u32 dp_sel; + ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); + check_warn_return(ret, "Error reading DP_SEL"); + + if (dp_sel & DP_SEL_DPRDY) + return 0; + + udelay(40); + } + + netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out"); + + return -EIO; +} + +static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, + u32 length, u32 *buf) +{ + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + u32 dp_sel; + int i, ret; + + mutex_lock(&pdata->dataport_mutex); + + ret = smsc75xx_dataport_wait_not_busy(dev); + check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry"); + + ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); + check_warn_goto_done(ret, "Error reading DP_SEL"); + + dp_sel &= ~DP_SEL_RSEL; + dp_sel |= ram_select; + ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); + check_warn_goto_done(ret, "Error writing DP_SEL"); + + for (i = 0; i < length; i++) { + ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); + check_warn_goto_done(ret, "Error writing DP_ADDR"); + + ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); + check_warn_goto_done(ret, "Error writing DP_DATA"); + + ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); + check_warn_goto_done(ret, "Error writing DP_CMD"); + + ret = smsc75xx_dataport_wait_not_busy(dev); + check_warn_goto_done(ret, "smsc75xx_dataport_write timeout"); + } + +done: + mutex_unlock(&pdata->dataport_mutex); + return ret; +} + +/* returns hash bit number for given MAC address */ +static u32 smsc75xx_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; +} + +static void smsc75xx_deferred_multicast_write(struct work_struct *param) +{ + struct smsc75xx_priv *pdata = + container_of(param, struct smsc75xx_priv, set_multicast); + struct usbnet *dev = pdata->dev; + int ret; + + netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x", + pdata->rfe_ctl); + + smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); + + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + check_warn(ret, "Error writing RFE_CRL"); +} + +static void smsc75xx_set_multicast(struct net_device *netdev) +{ + struct usbnet *dev = netdev_priv(netdev); + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + unsigned long flags; + int i; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + pdata->rfe_ctl &= + ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); + pdata->rfe_ctl |= RFE_CTL_AB; + + for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) + pdata->multicast_hash_table[i] = 0; + + if (dev->net->flags & IFF_PROMISC) { + netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); + pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; + } else if (dev->net->flags & IFF_ALLMULTI) { + netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); + pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; + } else if (!netdev_mc_empty(dev->net)) { + struct netdev_hw_addr *ha; + + netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); + + pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; + + netdev_for_each_mc_addr(ha, netdev) { + u32 bitnum = smsc75xx_hash(ha->addr); + pdata->multicast_hash_table[bitnum / 32] |= + (1 << (bitnum % 32)); + } + } else { + netif_dbg(dev, drv, dev->net, "receive own packets only"); + pdata->rfe_ctl |= RFE_CTL_DPF; + } + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_multicast); +} + +static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, + u16 lcladv, u16 rmtadv) +{ + u32 flow = 0, fct_flow = 0; + int ret; + + if (duplex == DUPLEX_FULL) { + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_TX) { + flow = (FLOW_TX_FCEN | 0xFFFF); + /* set fct_flow thresholds to 20% and 80% */ + fct_flow = (8 << 8) | 32; + } + + if (cap & FLOW_CTRL_RX) + flow |= FLOW_RX_FCEN; + + netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + netif_dbg(dev, link, dev->net, "half duplex"); + } + + ret = smsc75xx_write_reg(dev, FLOW, flow); + check_warn_return(ret, "Error writing FLOW"); + + ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); + check_warn_return(ret, "Error writing FCT_FLOW"); + + return 0; +} + +static int smsc75xx_link_reset(struct usbnet *dev) +{ + struct mii_if_info *mii = &dev->mii; + struct ethtool_cmd ecmd; + u16 lcladv, rmtadv; + int ret; + + /* clear interrupt status */ + ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); + check_warn_return(ret, "Error reading PHY_INT_SRC"); + + ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); + check_warn_return(ret, "Error writing INT_STS"); + + mii_check_media(mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); + rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); + + netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x" + " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv); + + return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); +} + +static void smsc75xx_status(struct usbnet *dev, struct urb *urb) +{ + u32 intdata; + + if (urb->actual_length != 4) { + netdev_warn(dev->net, + "unexpected urb length %d", urb->actual_length); + return; + } + + memcpy(&intdata, urb->transfer_buffer, 4); + le32_to_cpus(&intdata); + + netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata); + + if (intdata & INT_ENP_PHY_INT) + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + else + netdev_warn(dev->net, + "unexpected interrupt, intdata=0x%08X", intdata); +} + +/* Enable or disable Rx checksum offload engine */ +static int smsc75xx_set_rx_csum_offload(struct usbnet *dev) +{ + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + if (pdata->use_rx_csum) + pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; + else + pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + check_warn_return(ret, "Error writing RFE_CTL"); + + return 0; +} + +static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) +{ + return MAX_EEPROM_SIZE; +} + +static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct usbnet *dev = netdev_priv(netdev); + + ee->magic = LAN75XX_EEPROM_MAGIC; + + return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); +} + +static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct usbnet *dev = netdev_priv(netdev); + + if (ee->magic != LAN75XX_EEPROM_MAGIC) { + netdev_warn(dev->net, + "EEPROM: magic value mismatch: 0x%x", ee->magic); + return -EINVAL; + } + + return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); +} + +static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev) +{ + struct usbnet *dev = netdev_priv(netdev); + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + + return pdata->use_rx_csum; +} + +static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val) +{ + struct usbnet *dev = netdev_priv(netdev); + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + + pdata->use_rx_csum = !!val; + + return smsc75xx_set_rx_csum_offload(dev); +} + +static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; + else + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return 0; +} + +static const struct ethtool_ops smsc75xx_ethtool_ops = { + .get_link = usbnet_get_link, + .nway_reset = usbnet_nway_reset, + .get_drvinfo = usbnet_get_drvinfo, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_settings = usbnet_get_settings, + .set_settings = usbnet_set_settings, + .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, + .get_eeprom = smsc75xx_ethtool_get_eeprom, + .set_eeprom = smsc75xx_ethtool_set_eeprom, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_rx_csum = smsc75xx_ethtool_get_rx_csum, + .set_rx_csum = smsc75xx_ethtool_set_rx_csum, + .get_tso = ethtool_op_get_tso, + .set_tso = smsc75xx_ethtool_set_tso, +}; + +static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct usbnet *dev = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); +} + +static void smsc75xx_init_mac_address(struct usbnet *dev) +{ + /* try reading mac address from EEPROM */ + if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + dev->net->dev_addr) == 0) { + if (is_valid_ether_addr(dev->net->dev_addr)) { + /* eeprom values are valid so use them */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from EEPROM"); + return; + } + } + + /* no eeprom, or eeprom values are invalid. generate random MAC */ + random_ether_addr(dev->net->dev_addr); + netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr"); +} + +static int smsc75xx_set_mac_address(struct usbnet *dev) +{ + u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | + dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; + u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; + + int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); + check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret); + + ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); + check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret); + + addr_hi |= ADDR_FILTX_FB_VALID; + ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); + check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret); + + ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); + check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret); + + return 0; +} + +static int smsc75xx_phy_initialize(struct usbnet *dev) +{ + int bmcr, timeout = 0; + + /* Initialize MII structure */ + dev->mii.dev = dev->net; + dev->mii.mdio_read = smsc75xx_mdio_read; + dev->mii.mdio_write = smsc75xx_mdio_write; + dev->mii.phy_id_mask = 0x1f; + dev->mii.reg_num_mask = 0x1f; + dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; + + /* reset phy and wait for reset to complete */ + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); + + do { + msleep(10); + bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); + check_warn_return(bmcr, "Error reading MII_BMCR"); + timeout++; + } while ((bmcr & MII_BMCR) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout on PHY Reset"); + return -EIO; + } + + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + + /* read to clear */ + smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); + check_warn_return(bmcr, "Error reading PHY_INT_SRC"); + + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, + PHY_INT_MASK_DEFAULT); + mii_nway_restart(&dev->mii); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); + return 0; +} + +static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) +{ + int ret = 0; + u32 buf; + bool rxenabled; + + ret = smsc75xx_read_reg(dev, MAC_RX, &buf); + check_warn_return(ret, "Failed to read MAC_RX: %d", ret); + + rxenabled = ((buf & MAC_RX_RXEN) != 0); + + if (rxenabled) { + buf &= ~MAC_RX_RXEN; + ret = smsc75xx_write_reg(dev, MAC_RX, buf); + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); + } + + /* add 4 to size for FCS */ + buf &= ~MAC_RX_MAX_SIZE; + buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); + + ret = smsc75xx_write_reg(dev, MAC_RX, buf); + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); + + if (rxenabled) { + buf |= MAC_RX_RXEN; + ret = smsc75xx_write_reg(dev, MAC_RX, buf); + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); + } + + return 0; +} + +static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct usbnet *dev = netdev_priv(netdev); + + int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); + check_warn_return(ret, "Failed to set mac rx frame length"); + + return usbnet_change_mtu(netdev, new_mtu); +} + +static int smsc75xx_reset(struct usbnet *dev) +{ + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + u32 buf; + int ret = 0, timeout; + + netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); + + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + + buf |= HW_CFG_LRST; + + ret = smsc75xx_write_reg(dev, HW_CFG, buf); + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); + + timeout = 0; + do { + msleep(10); + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + timeout++; + } while ((buf & HW_CFG_LRST) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout on completion of Lite Reset"); + return -EIO; + } + + netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY"); + + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); + + buf |= PMT_CTL_PHY_RST; + + ret = smsc75xx_write_reg(dev, PMT_CTL, buf); + check_warn_return(ret, "Failed to write PMT_CTL: %d", ret); + + timeout = 0; + do { + msleep(10); + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); + check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); + timeout++; + } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout waiting for PHY Reset"); + return -EIO; + } + + netif_dbg(dev, ifup, dev->net, "PHY reset complete"); + + smsc75xx_init_mac_address(dev); + + ret = smsc75xx_set_mac_address(dev); + check_warn_return(ret, "Failed to set mac address"); + + netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr); + + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + + netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf); + + buf |= HW_CFG_BIR; + + ret = smsc75xx_write_reg(dev, HW_CFG, buf); + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); + + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + + netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after " + "writing HW_CFG_BIR: 0x%08x", buf); + + if (!turbo_mode) { + buf = 0; + dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; + } else if (dev->udev->speed == USB_SPEED_HIGH) { + buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; + } else { + buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; + } + + netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld", + (ulong)dev->rx_urb_size); + + ret = smsc75xx_write_reg(dev, BURST_CAP, buf); + check_warn_return(ret, "Failed to write BURST_CAP: %d", ret); + + ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); + check_warn_return(ret, "Failed to read BURST_CAP: %d", ret); + + netif_dbg(dev, ifup, dev->net, + "Read Value from BURST_CAP after writing: 0x%08x", buf); + + ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); + check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret); + + ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); + check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret); + + netif_dbg(dev, ifup, dev->net, + "Read Value from BULK_IN_DLY after writing: 0x%08x", buf); + + if (turbo_mode) { + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + + netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); + + buf |= (HW_CFG_MEF | HW_CFG_BCE); + + ret = smsc75xx_write_reg(dev, HW_CFG, buf); + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); + + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); + + netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); + } + + /* set FIFO sizes */ + buf = (MAX_RX_FIFO_SIZE - 512) / 512; + ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); + check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret); + + netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf); + + buf = (MAX_TX_FIFO_SIZE - 512) / 512; + ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); + check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret); + + netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf); + + ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); + check_warn_return(ret, "Failed to write INT_STS: %d", ret); + + ret = smsc75xx_read_reg(dev, ID_REV, &buf); + check_warn_return(ret, "Failed to read ID_REV: %d", ret); + + netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); + + /* Configure GPIO pins as LED outputs */ + ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); + check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); + + buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); + buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; + + ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); + check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); + + ret = smsc75xx_write_reg(dev, FLOW, 0); + check_warn_return(ret, "Failed to write FLOW: %d", ret); + + ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); + check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret); + + /* Don't need rfe_ctl_lock during initialisation */ + ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); + check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); + + pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; + + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + check_warn_return(ret, "Failed to write RFE_CTL: %d", ret); + + ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); + check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); + + netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); + + /* Enable or disable checksum offload engines */ + ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE); + ret = smsc75xx_set_rx_csum_offload(dev); + check_warn_return(ret, "Failed to set rx csum offload: %d", ret); + + smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE); + + smsc75xx_set_multicast(dev->net); + + ret = smsc75xx_phy_initialize(dev); + check_warn_return(ret, "Failed to initialize PHY: %d", ret); + + ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); + check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret); + + /* enable PHY interrupts */ + buf |= INT_ENP_PHY_INT; + + ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); + check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); + + ret = smsc75xx_read_reg(dev, MAC_TX, &buf); + check_warn_return(ret, "Failed to read MAC_TX: %d", ret); + + buf |= MAC_TX_TXEN; + + ret = smsc75xx_write_reg(dev, MAC_TX, buf); + check_warn_return(ret, "Failed to write MAC_TX: %d", ret); + + netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf); + + ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); + check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret); + + buf |= FCT_TX_CTL_EN; + + ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); + check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret); + + netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf); + + ret = smsc75xx_set_rx_max_frame_length(dev, 1514); + check_warn_return(ret, "Failed to set max rx frame length"); + + ret = smsc75xx_read_reg(dev, MAC_RX, &buf); + check_warn_return(ret, "Failed to read MAC_RX: %d", ret); + + buf |= MAC_RX_RXEN; + + ret = smsc75xx_write_reg(dev, MAC_RX, buf); + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); + + netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf); + + ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); + check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret); + + buf |= FCT_RX_CTL_EN; + + ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); + check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret); + + netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf); + + netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0"); + return 0; +} + +static const struct net_device_ops smsc75xx_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = smsc75xx_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = smsc75xx_ioctl, + .ndo_set_multicast_list = smsc75xx_set_multicast, +}; + +static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) +{ + struct smsc75xx_priv *pdata = NULL; + int ret; + + printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); + + ret = usbnet_get_endpoints(dev, intf); + check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret); + + dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), + GFP_KERNEL); + + pdata = (struct smsc75xx_priv *)(dev->data[0]); + if (!pdata) { + netdev_warn(dev->net, "Unable to allocate smsc75xx_priv"); + return -ENOMEM; + } + + pdata->dev = dev; + + spin_lock_init(&pdata->rfe_ctl_lock); + mutex_init(&pdata->dataport_mutex); + + INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); + + pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; + + /* We have to advertise SG otherwise TSO cannot be enabled */ + dev->net->features |= NETIF_F_SG; + + /* Init all registers */ + ret = smsc75xx_reset(dev); + + dev->net->netdev_ops = &smsc75xx_netdev_ops; + dev->net->ethtool_ops = &smsc75xx_ethtool_ops; + dev->net->flags |= IFF_MULTICAST; + dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; + return 0; +} + +static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + if (pdata) { + netif_dbg(dev, ifdown, dev->net, "free pdata"); + kfree(pdata); + pdata = NULL; + dev->data[0] = 0; + } +} + +static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a, + u32 rx_cmd_b) +{ + if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); + skb->ip_summed = CHECKSUM_COMPLETE; + } +} + +static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); + + while (skb->len > 0) { + u32 rx_cmd_a, rx_cmd_b, align_count, size; + struct sk_buff *ax_skb; + unsigned char *packet; + + memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); + le32_to_cpus(&rx_cmd_a); + skb_pull(skb, 4); + + memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); + le32_to_cpus(&rx_cmd_b); + skb_pull(skb, 4 + NET_IP_ALIGN); + + packet = skb->data; + + /* get the packet length */ + size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; + align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; + + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { + netif_dbg(dev, rx_err, dev->net, + "Error rx_cmd_a=0x%08x", rx_cmd_a); + dev->net->stats.rx_errors++; + dev->net->stats.rx_dropped++; + + if (rx_cmd_a & RX_CMD_A_FCS) + dev->net->stats.rx_crc_errors++; + else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) + dev->net->stats.rx_frame_errors++; + } else { + /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ + if (unlikely(size > (ETH_FRAME_LEN + 12))) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x", rx_cmd_a); + return 0; + } + + /* last frame in this batch */ + if (skb->len == size) { + if (pdata->use_rx_csum) + smsc75xx_rx_csum_offload(skb, rx_cmd_a, + rx_cmd_b); + else + skb->ip_summed = CHECKSUM_NONE; + + skb_trim(skb, skb->len - 4); /* remove fcs */ + skb->truesize = size + sizeof(struct sk_buff); + + return 1; + } + + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!ax_skb)) { + netdev_warn(dev->net, "Error allocating skb"); + return 0; + } + + ax_skb->len = size; + ax_skb->data = packet; + skb_set_tail_pointer(ax_skb, size); + + if (pdata->use_rx_csum) + smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a, + rx_cmd_b); + else + ax_skb->ip_summed = CHECKSUM_NONE; + + skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ + ax_skb->truesize = size + sizeof(struct sk_buff); + + usbnet_skb_return(dev, ax_skb); + } + + skb_pull(skb, size); + + /* padding bytes before the next frame starts */ + if (skb->len) + skb_pull(skb, align_count); + } + + if (unlikely(skb->len < 0)) { + netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); + return 0; + } + + return 1; +} + +static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, + struct sk_buff *skb, gfp_t flags) +{ + u32 tx_cmd_a, tx_cmd_b; + + skb_linearize(skb); + + if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { + struct sk_buff *skb2 = + skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; + + if (skb_is_gso(skb)) { + u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); + tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; + + tx_cmd_a |= TX_CMD_A_LSO; + } else { + tx_cmd_b = 0; + } + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_b); + memcpy(skb->data, &tx_cmd_b, 4); + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_a); + memcpy(skb->data, &tx_cmd_a, 4); + + return skb; +} + +static const struct driver_info smsc75xx_info = { + .description = "smsc75xx USB 2.0 Gigabit Ethernet", + .bind = smsc75xx_bind, + .unbind = smsc75xx_unbind, + .link_reset = smsc75xx_link_reset, + .reset = smsc75xx_reset, + .rx_fixup = smsc75xx_rx_fixup, + .tx_fixup = smsc75xx_tx_fixup, + .status = smsc75xx_status, + .flags = FLAG_ETHER | FLAG_SEND_ZLP, +}; + +static const struct usb_device_id products[] = { + { + /* SMSC7500 USB Gigabit Ethernet Device */ + USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), + .driver_info = (unsigned long) &smsc75xx_info, + }, + { + /* SMSC7500 USB Gigabit Ethernet Device */ + USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), + .driver_info = (unsigned long) &smsc75xx_info, + }, + { }, /* END */ +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver smsc75xx_driver = { + .name = SMSC_CHIPNAME, + .id_table = products, + .probe = usbnet_probe, + .suspend = usbnet_suspend, + .resume = usbnet_resume, + .disconnect = usbnet_disconnect, +}; + +static int __init smsc75xx_init(void) +{ + return usb_register(&smsc75xx_driver); +} +module_init(smsc75xx_init); + +static void __exit smsc75xx_exit(void) +{ + usb_deregister(&smsc75xx_driver); +} +module_exit(smsc75xx_exit); + +MODULE_AUTHOR("Nancy Lin"); +MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>"); +MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h new file mode 100644 index 00000000000..16e98c77834 --- /dev/null +++ b/drivers/net/usb/smsc75xx.h @@ -0,0 +1,421 @@ + /*************************************************************************** + * + * Copyright (C) 2007-2010 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *****************************************************************************/ + +#ifndef _SMSC75XX_H +#define _SMSC75XX_H + +/* Tx command words */ +#define TX_CMD_A_LSO (0x08000000) +#define TX_CMD_A_IPE (0x04000000) +#define TX_CMD_A_TPE (0x02000000) +#define TX_CMD_A_IVTG (0x01000000) +#define TX_CMD_A_RVTG (0x00800000) +#define TX_CMD_A_FCS (0x00400000) +#define TX_CMD_A_LEN (0x000FFFFF) + +#define TX_CMD_B_MSS (0x3FFF0000) +#define TX_CMD_B_MSS_SHIFT (16) +#define TX_MSS_MIN ((u16)8) +#define TX_CMD_B_VTAG (0x0000FFFF) + +/* Rx command words */ +#define RX_CMD_A_ICE (0x80000000) +#define RX_CMD_A_TCE (0x40000000) +#define RX_CMD_A_IPV (0x20000000) +#define RX_CMD_A_PID (0x18000000) +#define RX_CMD_A_PID_NIP (0x00000000) +#define RX_CMD_A_PID_TCP (0x08000000) +#define RX_CMD_A_PID_UDP (0x10000000) +#define RX_CMD_A_PID_PP (0x18000000) +#define RX_CMD_A_PFF (0x04000000) +#define RX_CMD_A_BAM (0x02000000) +#define RX_CMD_A_MAM (0x01000000) +#define RX_CMD_A_FVTG (0x00800000) +#define RX_CMD_A_RED (0x00400000) +#define RX_CMD_A_RWT (0x00200000) +#define RX_CMD_A_RUNT (0x00100000) +#define RX_CMD_A_LONG (0x00080000) +#define RX_CMD_A_RXE (0x00040000) +#define RX_CMD_A_DRB (0x00020000) +#define RX_CMD_A_FCS (0x00010000) +#define RX_CMD_A_UAM (0x00008000) +#define RX_CMD_A_LCSM (0x00004000) +#define RX_CMD_A_LEN (0x00003FFF) + +#define RX_CMD_B_CSUM (0xFFFF0000) +#define RX_CMD_B_CSUM_SHIFT (16) +#define RX_CMD_B_VTAG (0x0000FFFF) + +/* SCSRs */ +#define ID_REV (0x0000) + +#define FPGA_REV (0x0004) + +#define BOND_CTL (0x0008) + +#define INT_STS (0x000C) +#define INT_STS_RDFO_INT (0x00400000) +#define INT_STS_TXE_INT (0x00200000) +#define INT_STS_MACRTO_INT (0x00100000) +#define INT_STS_TX_DIS_INT (0x00080000) +#define INT_STS_RX_DIS_INT (0x00040000) +#define INT_STS_PHY_INT_ (0x00020000) +#define INT_STS_MAC_ERR_INT (0x00008000) +#define INT_STS_TDFU (0x00004000) +#define INT_STS_TDFO (0x00002000) +#define INT_STS_GPIOS (0x00000FFF) +#define INT_STS_CLEAR_ALL (0xFFFFFFFF) + +#define HW_CFG (0x0010) +#define HW_CFG_SMDET_STS (0x00008000) +#define HW_CFG_SMDET_EN (0x00004000) +#define HW_CFG_EEM (0x00002000) +#define HW_CFG_RST_PROTECT (0x00001000) +#define HW_CFG_PORT_SWAP (0x00000800) +#define HW_CFG_PHY_BOOST (0x00000600) +#define HW_CFG_PHY_BOOST_NORMAL (0x00000000) +#define HW_CFG_PHY_BOOST_4 (0x00002000) +#define HW_CFG_PHY_BOOST_8 (0x00004000) +#define HW_CFG_PHY_BOOST_12 (0x00006000) +#define HW_CFG_LEDB (0x00000100) +#define HW_CFG_BIR (0x00000080) +#define HW_CFG_SBP (0x00000040) +#define HW_CFG_IME (0x00000020) +#define HW_CFG_MEF (0x00000010) +#define HW_CFG_ETC (0x00000008) +#define HW_CFG_BCE (0x00000004) +#define HW_CFG_LRST (0x00000002) +#define HW_CFG_SRST (0x00000001) + +#define PMT_CTL (0x0014) +#define PMT_CTL_PHY_PWRUP (0x00000400) +#define PMT_CTL_RES_CLR_WKP_EN (0x00000100) +#define PMT_CTL_DEV_RDY (0x00000080) +#define PMT_CTL_SUS_MODE (0x00000060) +#define PMT_CTL_SUS_MODE_0 (0x00000000) +#define PMT_CTL_SUS_MODE_1 (0x00000020) +#define PMT_CTL_SUS_MODE_2 (0x00000040) +#define PMT_CTL_SUS_MODE_3 (0x00000060) +#define PMT_CTL_PHY_RST (0x00000010) +#define PMT_CTL_WOL_EN (0x00000008) +#define PMT_CTL_ED_EN (0x00000004) +#define PMT_CTL_WUPS (0x00000003) +#define PMT_CTL_WUPS_NO (0x00000000) +#define PMT_CTL_WUPS_ED (0x00000001) +#define PMT_CTL_WUPS_WOL (0x00000002) +#define PMT_CTL_WUPS_MULTI (0x00000003) + +#define LED_GPIO_CFG (0x0018) +#define LED_GPIO_CFG_LED2_FUN_SEL (0x80000000) +#define LED_GPIO_CFG_LED10_FUN_SEL (0x40000000) +#define LED_GPIO_CFG_LEDGPIO_EN (0x0000F000) +#define LED_GPIO_CFG_LEDGPIO_EN_0 (0x00001000) +#define LED_GPIO_CFG_LEDGPIO_EN_1 (0x00002000) +#define LED_GPIO_CFG_LEDGPIO_EN_2 (0x00004000) +#define LED_GPIO_CFG_LEDGPIO_EN_3 (0x00008000) +#define LED_GPIO_CFG_GPBUF (0x00000F00) +#define LED_GPIO_CFG_GPBUF_0 (0x00000100) +#define LED_GPIO_CFG_GPBUF_1 (0x00000200) +#define LED_GPIO_CFG_GPBUF_2 (0x00000400) +#define LED_GPIO_CFG_GPBUF_3 (0x00000800) +#define LED_GPIO_CFG_GPDIR (0x000000F0) +#define LED_GPIO_CFG_GPDIR_0 (0x00000010) +#define LED_GPIO_CFG_GPDIR_1 (0x00000020) +#define LED_GPIO_CFG_GPDIR_2 (0x00000040) +#define LED_GPIO_CFG_GPDIR_3 (0x00000080) +#define LED_GPIO_CFG_GPDATA (0x0000000F) +#define LED_GPIO_CFG_GPDATA_0 (0x00000001) +#define LED_GPIO_CFG_GPDATA_1 (0x00000002) +#define LED_GPIO_CFG_GPDATA_2 (0x00000004) +#define LED_GPIO_CFG_GPDATA_3 (0x00000008) + +#define GPIO_CFG (0x001C) +#define GPIO_CFG_SHIFT (24) +#define GPIO_CFG_GPEN (0xFF000000) +#define GPIO_CFG_GPBUF (0x00FF0000) +#define GPIO_CFG_GPDIR (0x0000FF00) +#define GPIO_CFG_GPDATA (0x000000FF) + +#define GPIO_WAKE (0x0020) +#define GPIO_WAKE_PHY_LINKUP_EN (0x80000000) +#define GPIO_WAKE_POL (0x0FFF0000) +#define GPIO_WAKE_POL_SHIFT (16) +#define GPIO_WAKE_WK (0x00000FFF) + +#define DP_SEL (0x0024) +#define DP_SEL_DPRDY (0x80000000) +#define DP_SEL_RSEL (0x0000000F) +#define DP_SEL_URX (0x00000000) +#define DP_SEL_VHF (0x00000001) +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) +#define DP_SEL_LSO_HEAD (0x00000002) +#define DP_SEL_FCT_RX (0x00000003) +#define DP_SEL_FCT_TX (0x00000004) +#define DP_SEL_DESCRIPTOR (0x00000005) +#define DP_SEL_WOL (0x00000006) + +#define DP_CMD (0x0028) +#define DP_CMD_WRITE (0x01) +#define DP_CMD_READ (0x00) + +#define DP_ADDR (0x002C) + +#define DP_DATA (0x0030) + +#define BURST_CAP (0x0034) +#define BURST_CAP_MASK (0x0000000F) + +#define INT_EP_CTL (0x0038) +#define INT_EP_CTL_INTEP_ON (0x80000000) +#define INT_EP_CTL_RDFO_EN (0x00400000) +#define INT_EP_CTL_TXE_EN (0x00200000) +#define INT_EP_CTL_MACROTO_EN (0x00100000) +#define INT_EP_CTL_TX_DIS_EN (0x00080000) +#define INT_EP_CTL_RX_DIS_EN (0x00040000) +#define INT_EP_CTL_PHY_EN_ (0x00020000) +#define INT_EP_CTL_MAC_ERR_EN (0x00008000) +#define INT_EP_CTL_TDFU_EN (0x00004000) +#define INT_EP_CTL_TDFO_EN (0x00002000) +#define INT_EP_CTL_RX_FIFO_EN (0x00001000) +#define INT_EP_CTL_GPIOX_EN (0x00000FFF) + +#define BULK_IN_DLY (0x003C) +#define BULK_IN_DLY_MASK (0xFFFF) + +#define E2P_CMD (0x0040) +#define E2P_CMD_BUSY (0x80000000) +#define E2P_CMD_MASK (0x70000000) +#define E2P_CMD_READ (0x00000000) +#define E2P_CMD_EWDS (0x10000000) +#define E2P_CMD_EWEN (0x20000000) +#define E2P_CMD_WRITE (0x30000000) +#define E2P_CMD_WRAL (0x40000000) +#define E2P_CMD_ERASE (0x50000000) +#define E2P_CMD_ERAL (0x60000000) +#define E2P_CMD_RELOAD (0x70000000) +#define E2P_CMD_TIMEOUT (0x00000400) +#define E2P_CMD_LOADED (0x00000200) +#define E2P_CMD_ADDR (0x000001FF) + +#define MAX_EEPROM_SIZE (512) + +#define E2P_DATA (0x0044) +#define E2P_DATA_MASK_ (0x000000FF) + +#define RFE_CTL (0x0060) +#define RFE_CTL_TCPUDP_CKM (0x00001000) +#define RFE_CTL_IP_CKM (0x00000800) +#define RFE_CTL_AB (0x00000400) +#define RFE_CTL_AM (0x00000200) +#define RFE_CTL_AU (0x00000100) +#define RFE_CTL_VS (0x00000080) +#define RFE_CTL_UF (0x00000040) +#define RFE_CTL_VF (0x00000020) +#define RFE_CTL_SPF (0x00000010) +#define RFE_CTL_MHF (0x00000008) +#define RFE_CTL_DHF (0x00000004) +#define RFE_CTL_DPF (0x00000002) +#define RFE_CTL_RST_RF (0x00000001) + +#define VLAN_TYPE (0x0064) +#define VLAN_TYPE_MASK (0x0000FFFF) + +#define FCT_RX_CTL (0x0090) +#define FCT_RX_CTL_EN (0x80000000) +#define FCT_RX_CTL_RST (0x40000000) +#define FCT_RX_CTL_SBF (0x02000000) +#define FCT_RX_CTL_OVERFLOW (0x01000000) +#define FCT_RX_CTL_FRM_DROP (0x00800000) +#define FCT_RX_CTL_RX_NOT_EMPTY (0x00400000) +#define FCT_RX_CTL_RX_EMPTY (0x00200000) +#define FCT_RX_CTL_RX_DISABLED (0x00100000) +#define FCT_RX_CTL_RXUSED (0x0000FFFF) + +#define FCT_TX_CTL (0x0094) +#define FCT_TX_CTL_EN (0x80000000) +#define FCT_TX_CTL_RST (0x40000000) +#define FCT_TX_CTL_TX_NOT_EMPTY (0x00400000) +#define FCT_TX_CTL_TX_EMPTY (0x00200000) +#define FCT_TX_CTL_TX_DISABLED (0x00100000) +#define FCT_TX_CTL_TXUSED (0x0000FFFF) + +#define FCT_RX_FIFO_END (0x0098) +#define FCT_RX_FIFO_END_MASK (0x0000007F) + +#define FCT_TX_FIFO_END (0x009C) +#define FCT_TX_FIFO_END_MASK (0x0000003F) + +#define FCT_FLOW (0x00A0) +#define FCT_FLOW_THRESHOLD_OFF (0x00007F00) +#define FCT_FLOW_THRESHOLD_OFF_SHIFT (8) +#define FCT_FLOW_THRESHOLD_ON (0x0000007F) + +/* MAC CSRs */ +#define MAC_CR (0x100) +#define MAC_CR_ADP (0x00002000) +#define MAC_CR_ADD (0x00001000) +#define MAC_CR_ASD (0x00000800) +#define MAC_CR_INT_LOOP (0x00000400) +#define MAC_CR_BOLMT (0x000000C0) +#define MAC_CR_FDPX (0x00000008) +#define MAC_CR_CFG (0x00000006) +#define MAC_CR_CFG_10 (0x00000000) +#define MAC_CR_CFG_100 (0x00000002) +#define MAC_CR_CFG_1000 (0x00000004) +#define MAC_CR_RST (0x00000001) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE (0x3FFF0000) +#define MAC_RX_MAX_SIZE_SHIFT (16) +#define MAC_RX_FCS_STRIP (0x00000010) +#define MAC_RX_FSE (0x00000004) +#define MAC_RX_RXD (0x00000002) +#define MAC_RX_RXEN (0x00000001) + +#define MAC_TX (0x108) +#define MAC_TX_BFCS (0x00000004) +#define MAC_TX_TXD (0x00000002) +#define MAC_TX_TXEN (0x00000001) + +#define FLOW (0x10C) +#define FLOW_FORCE_FC (0x80000000) +#define FLOW_TX_FCEN (0x40000000) +#define FLOW_RX_FCEN (0x20000000) +#define FLOW_FPF (0x10000000) +#define FLOW_PAUSE_TIME (0x0000FFFF) + +#define RAND_SEED (0x110) +#define RAND_SEED_MASK (0x0000FFFF) + +#define ERR_STS (0x114) +#define ERR_STS_FCS_ERR (0x00000100) +#define ERR_STS_LFRM_ERR (0x00000080) +#define ERR_STS_RUNT_ERR (0x00000040) +#define ERR_STS_COLLISION_ERR (0x00000010) +#define ERR_STS_ALIGN_ERR (0x00000008) +#define ERR_STS_URUN_ERR (0x00000004) + +#define RX_ADDRH (0x118) +#define RX_ADDRH_MASK (0x0000FFFF) + +#define RX_ADDRL (0x11C) + +#define MII_ACCESS (0x120) +#define MII_ACCESS_PHY_ADDR (0x0000F800) +#define MII_ACCESS_PHY_ADDR_SHIFT (11) +#define MII_ACCESS_REG_ADDR (0x000007C0) +#define MII_ACCESS_REG_ADDR_SHIFT (6) +#define MII_ACCESS_READ (0x00000000) +#define MII_ACCESS_WRITE (0x00000002) +#define MII_ACCESS_BUSY (0x00000001) + +#define MII_DATA (0x124) +#define MII_DATA_MASK (0x0000FFFF) + +#define WUCSR (0x140) +#define WUCSR_PFDA_FR (0x00000080) +#define WUCSR_WUFR (0x00000040) +#define WUCSR_MPR (0x00000020) +#define WUCSR_BCAST_FR (0x00000010) +#define WUCSR_PFDA_EN (0x00000008) +#define WUCSR_WUEN (0x00000004) +#define WUCSR_MPEN (0x00000002) +#define WUCSR_BCST_EN (0x00000001) + +#define WUF_CFGX (0x144) +#define WUF_CFGX_EN (0x80000000) +#define WUF_CFGX_ATYPE (0x03000000) +#define WUF_CFGX_ATYPE_UNICAST (0x00000000) +#define WUF_CFGX_ATYPE_MULTICAST (0x02000000) +#define WUF_CFGX_ATYPE_ALL (0x03000000) +#define WUF_CFGX_PATTERN_OFFSET (0x007F0000) +#define WUF_CFGX_PATTERN_OFFSET_SHIFT (16) +#define WUF_CFGX_CRC16 (0x0000FFFF) +#define WUF_NUM (8) + +#define WUF_MASKX (0x170) +#define WUF_MASKX_AVALID (0x80000000) +#define WUF_MASKX_ATYPE (0x40000000) + +#define ADDR_FILTX (0x300) +#define ADDR_FILTX_FB_VALID (0x80000000) +#define ADDR_FILTX_FB_TYPE (0x40000000) +#define ADDR_FILTX_FB_ADDRHI (0x0000FFFF) +#define ADDR_FILTX_SB_ADDRLO (0xFFFFFFFF) + +#define WUCSR2 (0x500) +#define WUCSR2_NS_RCD (0x00000040) +#define WUCSR2_ARP_RCD (0x00000020) +#define WUCSR2_TCPSYN_RCD (0x00000010) +#define WUCSR2_NS_OFFLOAD (0x00000004) +#define WUCSR2_ARP_OFFLOAD (0x00000002) +#define WUCSR2_TCPSYN_OFFLOAD (0x00000001) + +#define WOL_FIFO_STS (0x504) + +#define IPV6_ADDRX (0x510) + +#define IPV4_ADDRX (0x590) + + +/* Vendor-specific PHY Definitions */ + +/* Mode Control/Status Register */ +#define PHY_MODE_CTRL_STS (17) +#define MODE_CTRL_STS_EDPWRDOWN ((u16)0x2000) +#define MODE_CTRL_STS_ENERGYON ((u16)0x0002) + +#define PHY_INT_SRC (29) +#define PHY_INT_SRC_ENERGY_ON ((u16)0x0080) +#define PHY_INT_SRC_ANEG_COMP ((u16)0x0040) +#define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020) +#define PHY_INT_SRC_LINK_DOWN ((u16)0x0010) + +#define PHY_INT_MASK (30) +#define PHY_INT_MASK_ENERGY_ON ((u16)0x0080) +#define PHY_INT_MASK_ANEG_COMP ((u16)0x0040) +#define PHY_INT_MASK_REMOTE_FAULT ((u16)0x0020) +#define PHY_INT_MASK_LINK_DOWN ((u16)0x0010) +#define PHY_INT_MASK_DEFAULT (PHY_INT_MASK_ANEG_COMP | \ + PHY_INT_MASK_LINK_DOWN) + +#define PHY_SPECIAL (31) +#define PHY_SPECIAL_SPD ((u16)0x001C) +#define PHY_SPECIAL_SPD_10HALF ((u16)0x0004) +#define PHY_SPECIAL_SPD_10FULL ((u16)0x0014) +#define PHY_SPECIAL_SPD_100HALF ((u16)0x0008) +#define PHY_SPECIAL_SPD_100FULL ((u16)0x0018) + +/* USB Vendor Requests */ +#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 +#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 +#define USB_VENDOR_REQUEST_GET_STATS 0xA2 + +/* Interrupt Endpoint status word bitfields */ +#define INT_ENP_RDFO_INT ((u32)BIT(22)) +#define INT_ENP_TXE_INT ((u32)BIT(21)) +#define INT_ENP_TX_DIS_INT ((u32)BIT(19)) +#define INT_ENP_RX_DIS_INT ((u32)BIT(18)) +#define INT_ENP_PHY_INT ((u32)BIT(17)) +#define INT_ENP_MAC_ERR_INT ((u32)BIT(15)) +#define INT_ENP_RX_FIFO_DATA_INT ((u32)BIT(12)) + +#endif /* _SMSC75XX_H */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index df9179a1c93..12a3c88c528 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -28,6 +28,7 @@ #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> +#include <linux/slab.h> #include "smsc95xx.h" #define SMSC_CHIPNAME "smsc95xx" @@ -384,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev) pdata->mac_cr |= MAC_CR_MCPAS_; pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_); } else if (!netdev_mc_empty(dev->net)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; pdata->mac_cr |= MAC_CR_HPFILT_; pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - netdev_for_each_mc_addr(mc_list, netdev) { - u32 bitnum = smsc95xx_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + u32 bitnum = smsc95xx_hash(ha->addr); u32 mask = 0x01 << (bitnum & 0x1F); if (bitnum & 0x20) hash_hi |= mask; @@ -709,6 +710,8 @@ static void smsc95xx_start_rx_path(struct usbnet *dev) static int smsc95xx_phy_initialize(struct usbnet *dev) { + int bmcr, timeout = 0; + /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = smsc95xx_mdio_read; @@ -717,7 +720,20 @@ static int smsc95xx_phy_initialize(struct usbnet *dev) dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID; + /* reset phy and wait for reset to complete */ smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); + + do { + msleep(10); + bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); + timeout++; + } while ((bmcr & MII_BMCR) && (timeout < 100)); + + if (timeout >= 100) { + netdev_warn(dev->net, "timeout on PHY Reset"); + return -EIO; + } + smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); @@ -1174,9 +1190,21 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, } if (csum) { - u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); - skb_push(skb, 4); - memcpy(skb->data, &csum_preamble, 4); + if (skb->len <= 45) { + /* workaround - hardware tx checksum does not work + * properly with extremely small packets */ + long csstart = skb->csum_start - skb_headroom(skb); + __wsum calc = csum_partial(skb->data + csstart, + skb->len - csstart, 0); + *((__sum16 *)(skb->data + csstart + + skb->csum_offset)) = csum_fold(calc); + + csum = false; + } else { + u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); + skb_push(skb, 4); + memcpy(skb->data, &csum_preamble, 4); + } } skb_push(skb, 4); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 17b6a62d206..a95c73de582 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -43,6 +43,7 @@ #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> +#include <linux/slab.h> #define DRIVER_VERSION "22-Aug-2005" @@ -1068,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, * NOTE: strictly conforming cdc-ether devices should expect * the ZLP here, but ignore the one-byte packet. */ - if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { - urb->transfer_buffer_length++; - if (skb_tailroom(skb)) { - skb->data[skb->len] = 0; - __skb_put(skb, 1); - } + if (length % dev->maxpacket == 0) { + if (!(info->flags & FLAG_SEND_ZLP)) { + urb->transfer_buffer_length++; + if (skb_tailroom(skb)) { + skb->data[skb->len] = 0; + __skb_put(skb, 1); + } + } else + urb->transfer_flags |= URB_ZERO_PACKET; } spin_lock_irqsave(&dev->txq.lock, flags); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index b583d4968ad..f9f0730b53d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -9,6 +9,7 @@ */ #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 50f881aa393..467bcff13cd 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -89,7 +89,6 @@ static const int multicast_filter_limit = 32; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -1704,11 +1703,11 @@ static void rhine_set_rx_mode(struct net_device *dev) iowrite32(0xffffffff, ioaddr + MulticastFilter1); rx_mode = 0x0C; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3a486f3bad3..616f8c92b74 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs) u32 status = 0; u16 ANAR; - if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) + if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs)) status |= VELOCITY_LINK_FAIL; - if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) + if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs)) status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; - else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) + else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs)) status |= (VELOCITY_SPEED_1000); else { - velocity_mii_read(regs, MII_REG_ANAR, &ANAR); - if (ANAR & ANAR_TXFD) + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if (ANAR & ADVERTISE_100FULL) status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); - else if (ANAR & ANAR_TX) + else if (ANAR & ADVERTISE_100HALF) status |= VELOCITY_SPEED_100; - else if (ANAR & ANAR_10FD) + else if (ANAR & ADVERTISE_10FULL) status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); else status |= (VELOCITY_SPEED_10); } - if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { - velocity_mii_read(regs, MII_REG_ANAR, &ANAR); - if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) - == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { - if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) status |= VELOCITY_AUTONEG_ENABLE; } } @@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr) /*Enable or Disable PAUSE in ANAR */ switch (vptr->options.flow_cntl) { case FLOW_CNTL_TX: - MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); break; case FLOW_CNTL_RX: - MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); break; case FLOW_CNTL_TX_RX: - MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); break; case FLOW_CNTL_DISABLE: - MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); break; default: break; @@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr) */ static void mii_set_auto_on(struct velocity_info *vptr) { - if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) - MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); else - MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); } static u32 check_connection_type(struct mac_regs __iomem *regs) @@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs) else status |= VELOCITY_SPEED_100; - if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { - velocity_mii_read(regs, MII_REG_ANAR, &ANAR); - if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) - == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { - if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) status |= VELOCITY_AUTONEG_ENABLE; } } @@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) */ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) - MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); /* * If connection type is AUTO @@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) /* clear force MAC mode bit */ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); /* set duplex mode of MAC according to duplex mode of MII */ - MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); - MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); /* enable AUTO-NEGO mode */ mii_set_auto_on(vptr); @@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); } - MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); else BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); - /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ - velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); - ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); + /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */ + velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); + ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)); if (mii_status & VELOCITY_SPEED_100) { if (mii_status & VELOCITY_DUPLEX_FULL) - ANAR |= ANAR_TXFD; + ANAR |= ADVERTISE_100FULL; else - ANAR |= ANAR_TX; + ANAR |= ADVERTISE_100HALF; } else { if (mii_status & VELOCITY_DUPLEX_FULL) - ANAR |= ANAR_10FD; + ANAR |= ADVERTISE_10FULL; else - ANAR |= ANAR_10; + ANAR |= ADVERTISE_10HALF; } - velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); + velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); /* enable AUTO-NEGO mode */ mii_set_auto_on(vptr); - /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ + /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */ } /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ @@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev) struct mac_regs __iomem *regs = vptr->mac_regs; u8 rx_mode; int i; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ writel(0xffffffff, ®s->MARCAM[0]); @@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev) mac_get_cam_mask(regs, vptr->mCAMmask); i = 0; - netdev_for_each_mc_addr(mclist, dev) { - mac_set_cam(regs, i + offset, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + mac_set_cam(regs, i + offset, ha->addr); vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); i++; } @@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status) /* * Reset to hardware default */ - MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); /* * Turn on ECHODIS bit in NWay-forced full mode and turn it * off it in NWay-forced half mode for NWay-forced v.s. * legacy-forced issue. */ if (vptr->mii_status & VELOCITY_DUPLEX_FULL) - MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); else - MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); /* * Turn on Link/Activity LED enable bit for CIS8201 */ - MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); break; case PHYID_VT3216_32BIT: case PHYID_VT3216_64BIT: /* * Reset to hardware default */ - MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); /* * Turn on ECHODIS bit in NWay-forced full mode and turn it * off it in NWay-forced half mode for NWay-forced v.s. * legacy-forced issue */ if (vptr->mii_status & VELOCITY_DUPLEX_FULL) - MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); else - MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); break; case PHYID_MARVELL_1000: @@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status) /* * Reset to hardware default */ - MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); break; default: ; } - velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); - if (BMCR & BMCR_ISO) { - BMCR &= ~BMCR_ISO; - velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); + velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); + if (BMCR & BMCR_ISOLATE) { + BMCR &= ~BMCR_ISOLATE; + velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); } } @@ -2953,13 +2953,13 @@ static int velocity_set_wol(struct velocity_info *vptr) if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) - MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); - MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); } if (vptr->mii_status & VELOCITY_SPEED_1000) - MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index ef4a0f64ba1..c38191179fa 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -1240,86 +1240,16 @@ struct velocity_context { u32 pattern[8]; }; - -/* - * MII registers. - */ - - /* * Registers in the MII (offset unit is WORD) */ -#define MII_REG_BMCR 0x00 // physical address -#define MII_REG_BMSR 0x01 // -#define MII_REG_PHYID1 0x02 // OUI -#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID -#define MII_REG_ANAR 0x04 // -#define MII_REG_ANLPAR 0x05 // -#define MII_REG_G1000CR 0x09 // -#define MII_REG_G1000SR 0x0A // -#define MII_REG_MODCFG 0x10 // -#define MII_REG_TCSR 0x16 // -#define MII_REG_PLED 0x1B // -// NS, MYSON only -#define MII_REG_PCR 0x17 // -// ESI only -#define MII_REG_PCSR 0x17 // -#define MII_REG_AUXCR 0x1C // - // Marvell 88E1000/88E1000S #define MII_REG_PSCR 0x10 // PHY specific control register // -// Bits in the BMCR register -// -#define BMCR_RESET 0x8000 // -#define BMCR_LBK 0x4000 // -#define BMCR_SPEED100 0x2000 // -#define BMCR_AUTO 0x1000 // -#define BMCR_PD 0x0800 // -#define BMCR_ISO 0x0400 // -#define BMCR_REAUTO 0x0200 // -#define BMCR_FDX 0x0100 // -#define BMCR_SPEED1G 0x0040 // -// -// Bits in the BMSR register -// -#define BMSR_AUTOCM 0x0020 // -#define BMSR_LNK 0x0004 // - -// -// Bits in the ANAR register -// -#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support -#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support -#define ANAR_T4 0x0200 // -#define ANAR_TXFD 0x0100 // -#define ANAR_TX 0x0080 // -#define ANAR_10FD 0x0040 // -#define ANAR_10 0x0020 // -// -// Bits in the ANLPAR register -// -#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support -#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support -#define ANLPAR_T4 0x0200 // -#define ANLPAR_TXFD 0x0100 // -#define ANLPAR_TX 0x0080 // -#define ANLPAR_10FD 0x0040 // -#define ANLPAR_10 0x0020 // - -// -// Bits in the G1000CR register -// -#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable -#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable - -// -// Bits in the G1000SR register +// Bits in the Silicon revision register // -#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable -#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable #define TCSR_ECHODIS 0x2000 // #define AUXCR_MDPPS 0x0004 // @@ -1338,7 +1268,6 @@ struct velocity_context { #define PHYID_REV_ID_MASK 0x0000000FUL -#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK) #define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) #define MII_REG_BITS_ON(x,i,p) do {\ @@ -1362,8 +1291,8 @@ struct velocity_context { #define MII_GET_PHY_ID(p) ({\ u32 id;\ - velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\ - velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\ + velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\ + velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\ (id);}) /* diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 25dc77ccbf5..b0a85d03879 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -25,6 +25,7 @@ #include <linux/virtio_net.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> +#include <linux/slab.h> static int napi_weight = 128; module_param(napi_weight, int, 0444); @@ -39,8 +40,7 @@ module_param(gso, bool, 0444); #define VIRTNET_SEND_COMMAND_SG_MAX 2 -struct virtnet_info -{ +struct virtnet_info { struct virtio_device *vdev; struct virtqueue *rvq, *svq, *cvq; struct net_device *dev; @@ -61,6 +61,10 @@ struct virtnet_info /* Chain pages by the private ptr. */ struct page *pages; + + /* fragments + linear part + virtio header */ + struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; + struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; }; struct skb_vnet_hdr { @@ -323,7 +327,6 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) { struct sk_buff *skb; struct skb_vnet_hdr *hdr; - struct scatterlist sg[2]; int err; skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); @@ -333,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); - sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); + sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); - skb_to_sgvec(skb, sg + 1, 0, skb->len); + skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); - err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); + err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); if (err < 0) dev_kfree_skb(skb); @@ -346,12 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) { - struct scatterlist sg[MAX_SKB_FRAGS + 2]; struct page *first, *list = NULL; char *p; int i, err, offset; - /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ + /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(vi, gfp); if (!first) { @@ -359,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) give_pages(vi, list); return -ENOMEM; } - sg_set_buf(&sg[i], page_address(first), PAGE_SIZE); + sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; @@ -373,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) } p = page_address(first); - /* sg[0], sg[1] share the same page */ - /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/ - sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr)); + /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ + /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ + sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); - /* sg[1] for data packet, from offset */ + /* vi->rx_sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); - sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset); + sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; - err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, + err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, first); if (err < 0) give_pages(vi, first); @@ -394,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) { struct page *page; - struct scatterlist sg; int err; page = get_a_page(vi, gfp); if (!page) return -ENOMEM; - sg_init_one(&sg, page_address(page), PAGE_SIZE); + sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); - err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); + err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page); if (err < 0) give_pages(vi, page); @@ -512,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) { - struct scatterlist sg[2+MAX_SKB_FRAGS]; struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; - sg_init_table(sg, 2+MAX_SKB_FRAGS); - pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -551,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) /* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) - sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr); + sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); else - sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); + sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); - hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; - return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); + hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; + return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg, + 0, skb); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -719,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev) struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; - struct dev_addr_list *addr; struct netdev_hw_addr *ha; int uc_count; int mc_count; @@ -776,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) mac_data->entries = mc_count; i = 0; - netdev_for_each_mc_addr(addr, dev) - memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); @@ -939,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev) vdev->priv = vi; vi->pages = NULL; INIT_DELAYED_WORK(&vi->refill, refill_work); + sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); + sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index cff3485d967..90e783a0924 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1174,7 +1174,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, netif_receive_skb(skb); } - adapter->netdev->last_rx = jiffies; ctx->skb = NULL; } @@ -1675,11 +1674,11 @@ vmxnet3_copy_mc(struct net_device *netdev) /* We may be called with BH disabled */ buf = kmalloc(sz, GFP_ATOMIC); if (buf) { - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; int i = 0; - netdev_for_each_mc_addr(mc, netdev) - memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr, + netdev_for_each_mc_addr(ha, netdev) + memcpy(buf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); } } diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 32a75fa935e..a5fc8166c01 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -15,6 +15,7 @@ #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/slab.h> #include "vxge-traffic.h" #include "vxge-config.h" @@ -356,8 +357,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) switch (host_type) { case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: - access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | - VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + if (func_id == 0) { + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + } break; case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | @@ -632,8 +635,10 @@ vxge_hw_device_initialize( __vxge_hw_device_pci_e_init(hldev); status = __vxge_hw_device_reg_addr_get(hldev); - if (status != VXGE_HW_OK) + if (status != VXGE_HW_OK) { + vfree(hldev); goto exit; + } __vxge_hw_device_id_get(hldev); __vxge_hw_device_host_info_get(hldev); @@ -1217,14 +1222,13 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, } /* - * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs + * __vxge_hw_ring_replenish - Initial replenish of RxDs * This function replenishes the RxDs from reserve array to work array */ enum vxge_hw_status -vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) +vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) { void *rxd; - int i = 0; struct __vxge_hw_channel *channel; enum vxge_hw_status status = VXGE_HW_OK; @@ -1245,11 +1249,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) } vxge_hw_ring_rxd_post(ring, rxd); - if (min_flag) { - i++; - if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION) - break; - } } status = VXGE_HW_OK; exit: @@ -1354,7 +1353,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, * Currently we don't have a case when the 1) is done without the 2). */ if (ring->rxd_init) { - status = vxge_hw_ring_replenish(ring, 1); + status = vxge_hw_ring_replenish(ring); if (status != VXGE_HW_OK) { __vxge_hw_ring_delete(vp); goto exit; @@ -1416,7 +1415,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) goto exit; if (ring->rxd_init) { - status = vxge_hw_ring_replenish(ring, 1); + status = vxge_hw_ring_replenish(ring); if (status != VXGE_HW_OK) goto exit; } diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index e7877df092f..4ae2625d4d8 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h @@ -14,6 +14,7 @@ #ifndef VXGE_CONFIG_H #define VXGE_CONFIG_H #include <linux/list.h> +#include <linux/slab.h> #ifndef VXGE_CACHE_LINE_SIZE #define VXGE_CACHE_LINE_SIZE 128 @@ -764,10 +765,18 @@ struct vxge_hw_device_hw_info { #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 #define VXGE_HW_VH_NORMAL_FUNCTION 7 u64 function_mode; -#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 -#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 +#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1 #define VXGE_HW_FUNCTION_MODE_SRIOV 2 #define VXGE_HW_FUNCTION_MODE_MRIOV 3 +#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5 +#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6 +#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9 +#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10 + u32 func_id; u64 vpath_mask; struct vxge_hw_device_version fw_version; @@ -1914,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, gfp_t flags; void *vaddr; unsigned long misaligned = 0; + int realloc_flag = 0; *p_dma_acch = *p_dmah = NULL; if (in_interrupt()) flags = GFP_ATOMIC | GFP_DMA; else flags = GFP_KERNEL | GFP_DMA; - - size += VXGE_CACHE_LINE_SIZE; - +realloc: vaddr = kmalloc((size), flags); if (vaddr == NULL) return vaddr; - misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), + misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr, VXGE_CACHE_LINE_SIZE); + if (realloc_flag) + goto out; + + if (misaligned) { + /* misaligned, free current one and try allocating + * size + VXGE_CACHE_LINE_SIZE memory + */ + kfree((void *) vaddr); + size += VXGE_CACHE_LINE_SIZE; + realloc_flag = 1; + goto realloc; + } +out: *(unsigned long *)p_dma_acch = misaligned; vaddr = (void *)((u8 *)vaddr + misaligned); return vaddr; @@ -2253,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set( struct vxge_hw_rth_hash_types *hash_type, u16 bucket_size); +enum vxge_hw_status +__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); #endif diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index c6736b97263..cadef8549c0 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c @@ -12,6 +12,7 @@ * Copyright(c) 2002-2009 Neterion Inc. ******************************************************************************/ #include<linux/ethtool.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/etherdevice.h> @@ -108,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev, int index, offset; enum vxge_hw_status status; u64 reg; - u8 *reg_space = (u8 *) space; + u64 *reg_space = (u64 *) space; struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); @@ -128,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev, __func__, __LINE__); return; } - - memcpy((reg_space + offset), ®, 8); + *reg_space++ = reg; } } } diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 46a7c9e689e..2bab36421f7 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -43,6 +43,7 @@ #include <linux/if_vlan.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/tcp.h> #include <net/ip.h> #include <linux/netdevice.h> @@ -444,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, ring->ndev->name, __func__, __LINE__); ring->pkts_processed = 0; - vxge_hw_ring_replenish(ringh, 0); + vxge_hw_ring_replenish(ringh); do { prefetch((char *)dtr + L1_CACHE_BYTES); @@ -1117,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) */ static void vxge_set_multicast(struct net_device *dev) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; struct __vxge_hw_device *hldev; @@ -1217,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev) } /* Add new ones */ - netdev_for_each_mc_addr(mclist, dev) { - memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info.vpath_no = vpath_idx; @@ -1363,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) { struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; - int msix_id, alarm_msix_id; - int tim_msix_id[4] = {[0 ...3] = 0}; + int msix_id = 0; + int tim_msix_id[4] = {0, 1, 0, 0}; + int alarm_msix_id = VXGE_ALARM_MSIX_ID; vxge_hw_vpath_intr_enable(vpath->handle); if (vdev->config.intr_type == INTA) vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); else { - msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; - alarm_msix_id = - VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; - - tim_msix_id[0] = msix_id; - tim_msix_id[1] = msix_id + 1; vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, alarm_msix_id); + msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); /* enable the alarm vector */ - vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); + msix_id = (vpath->handle->vpath->hldev->first_vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); } } @@ -1405,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) if (vdev->config.intr_type == INTA) vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); else { - msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; + msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_mask(vpath->handle, msix_id); vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); /* disable the alarm vector */ - msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + msix_id = (vpath->handle->vpath->hldev->first_vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; vxge_hw_vpath_msix_mask(vpath->handle, msix_id); } } @@ -2223,19 +2223,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id) enum vxge_hw_status status; struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; struct vxgedev *vdev = vpath->vdev; - int alarm_msix_id = - VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + int msix_id = (vpath->handle->vpath->vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; for (i = 0; i < vdev->no_of_vpath; i++) { - vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, - alarm_msix_id); + vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, - alarm_msix_id); + msix_id); continue; } vxge_debug_intr(VXGE_ERR, @@ -2248,18 +2247,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id) static int vxge_alloc_msix(struct vxgedev *vdev) { int j, i, ret = 0; - int intr_cnt = 0; - int alarm_msix_id = 0, msix_intr_vect = 0; + int msix_intr_vect = 0, temp; vdev->intr_cnt = 0; +start: /* Tx/Rx MSIX Vectors count */ vdev->intr_cnt = vdev->no_of_vpath * 2; /* Alarm MSIX Vectors count */ vdev->intr_cnt++; - intr_cnt = (vdev->max_vpath_supported * 2) + 1; - vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry), + vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), GFP_KERNEL); if (!vdev->entries) { vxge_debug_init(VXGE_ERR, @@ -2268,8 +2266,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev) return -ENOMEM; } - vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), - GFP_KERNEL); + vdev->vxge_entries = + kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), + GFP_KERNEL); if (!vdev->vxge_entries) { vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", VXGE_DRIVER_NAME); @@ -2277,9 +2276,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev) return -ENOMEM; } - /* Last vector in the list is used for alarm */ - alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; - for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) { + for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; @@ -2297,47 +2294,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev) } /* Initialize the alarm vector */ - vdev->entries[j].entry = alarm_msix_id; - vdev->vxge_entries[j].entry = alarm_msix_id; + vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; + vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; vdev->vxge_entries[j].in_use = 0; - ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); - /* if driver request exceeeds available irq's, request with a small - * number. - */ - if (ret > 0) { - vxge_debug_init(VXGE_ERR, - "%s: MSI-X enable failed for %d vectors, available: %d", - VXGE_DRIVER_NAME, intr_cnt, ret); - vdev->max_vpath_supported = vdev->no_of_vpath; - intr_cnt = (vdev->max_vpath_supported * 2) + 1; - - /* Reset the alarm vector setting */ - vdev->entries[j].entry = 0; - vdev->vxge_entries[j].entry = 0; - - /* Initialize the alarm vector with new setting */ - vdev->entries[intr_cnt - 1].entry = alarm_msix_id; - vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id; - vdev->vxge_entries[intr_cnt - 1].in_use = 0; - - ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); - if (!ret) - vxge_debug_init(VXGE_ERR, - "%s: MSI-X enabled for %d vectors", - VXGE_DRIVER_NAME, intr_cnt); - } + ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); - if (ret) { + if (ret > 0) { vxge_debug_init(VXGE_ERR, "%s: MSI-X enable failed for %d vectors, ret: %d", - VXGE_DRIVER_NAME, intr_cnt, ret); + VXGE_DRIVER_NAME, vdev->intr_cnt, ret); kfree(vdev->entries); kfree(vdev->vxge_entries); vdev->entries = NULL; vdev->vxge_entries = NULL; + + if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) + return -ENODEV; + /* Try with less no of vector by reducing no of vpaths count */ + temp = (ret - 1)/2; + vxge_close_vpaths(vdev, temp); + vdev->no_of_vpath = temp; + goto start; + } else if (ret < 0) return -ENODEV; - } + return 0; } @@ -2345,43 +2326,26 @@ static int vxge_enable_msix(struct vxgedev *vdev) { int i, ret = 0; - enum vxge_hw_status status; /* 0 - Tx, 1 - Rx */ - int tim_msix_id[4]; - int alarm_msix_id = 0, msix_intr_vect = 0; + int tim_msix_id[4] = {0, 1, 0, 0}; + vdev->intr_cnt = 0; /* allocate msix vectors */ ret = vxge_alloc_msix(vdev); if (!ret) { - /* Last vector in the list is used for alarm */ - alarm_msix_id = - VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; for (i = 0; i < vdev->no_of_vpath; i++) { /* If fifo or ring are not enabled the MSIX vector for that should be set to 0 Hence initializeing this array to all 0s. */ - memset(tim_msix_id, 0, sizeof(tim_msix_id)); - msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; - tim_msix_id[0] = msix_intr_vect; + vdev->vpaths[i].ring.rx_vector_no = + (vdev->vpaths[i].device_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + 1; - tim_msix_id[1] = msix_intr_vect + 1; - vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1]; - - status = vxge_hw_vpath_msix_set( - vdev->vpaths[i].handle, - tim_msix_id, alarm_msix_id); - if (status != VXGE_HW_OK) { - vxge_debug_init(VXGE_ERR, - "vxge_hw_vpath_msix_set " - "failed with status : %x", status); - kfree(vdev->entries); - kfree(vdev->vxge_entries); - pci_disable_msix(vdev->pdev); - return -ENODEV; - } + vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, + tim_msix_id, VXGE_ALARM_MSIX_ID); } } @@ -2392,7 +2356,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) { int intr_cnt; - for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); + for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); intr_cnt++) { if (vdev->vxge_entries[intr_cnt].in_use) { synchronize_irq(vdev->entries[intr_cnt].vector); @@ -2457,9 +2421,10 @@ static int vxge_add_isr(struct vxgedev *vdev) switch (msix_idx) { case 0: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, - "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", - vdev->ndev->name, pci_fun, vp_idx, - vdev->entries[intr_cnt].entry); + "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun, vp_idx); ret = request_irq( vdev->entries[intr_cnt].vector, vxge_tx_msix_handle, 0, @@ -2471,9 +2436,10 @@ static int vxge_add_isr(struct vxgedev *vdev) break; case 1: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, - "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", - vdev->ndev->name, pci_fun, vp_idx, - vdev->entries[intr_cnt].entry); + "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun, vp_idx); ret = request_irq( vdev->entries[intr_cnt].vector, vxge_rx_msix_napi_handle, @@ -2501,9 +2467,11 @@ static int vxge_add_isr(struct vxgedev *vdev) if (irq_req) { /* We requested for this msix interrupt */ vdev->vxge_entries[intr_cnt].in_use = 1; + msix_idx += vdev->vpaths[vp_idx].device_id * + VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_unmask( vdev->vpaths[vp_idx].handle, - intr_idx); + msix_idx); intr_cnt++; } @@ -2513,16 +2481,17 @@ static int vxge_add_isr(struct vxgedev *vdev) vp_idx++; } - intr_cnt = vdev->max_vpath_supported * 2; + intr_cnt = vdev->no_of_vpath * 2; snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, - "%s:vxge Alarm fn: %d MSI-X: %d", - vdev->ndev->name, pci_fun, - vdev->entries[intr_cnt].entry); + "%s:vxge:MSI-X %d - Alarm - fn:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun); /* For Alarm interrupts */ ret = request_irq(vdev->entries[intr_cnt].vector, vxge_alarm_msix_handle, 0, vdev->desc[intr_cnt], - &vdev->vpaths[vp_idx]); + &vdev->vpaths[0]); if (ret) { vxge_debug_init(VXGE_ERR, "%s: MSIX - %d Registration failed", @@ -2535,16 +2504,19 @@ static int vxge_add_isr(struct vxgedev *vdev) goto INTA_MODE; } + msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, - intr_idx - 2); + msix_idx); vdev->vxge_entries[intr_cnt].in_use = 1; - vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; + vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } INTA_MODE: #endif - snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); if (vdev->config.intr_type == INTA) { + snprintf(vdev->desc[0], VXGE_INTR_STRLEN, + "%s:vxge:INTA", vdev->ndev->name); vxge_hw_device_set_intr_type(vdev->devh, VXGE_HW_INTR_MODE_IRQLINE); vxge_hw_vpath_tti_ci_set(vdev->devh, @@ -3994,6 +3966,36 @@ static void vxge_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } +static inline u32 vxge_get_num_vfs(u64 function_mode) +{ + u32 num_functions = 0; + + switch (function_mode) { + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: + case VXGE_HW_FUNCTION_MODE_SRIOV_8: + num_functions = 8; + break; + case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: + num_functions = 1; + break; + case VXGE_HW_FUNCTION_MODE_SRIOV: + case VXGE_HW_FUNCTION_MODE_MRIOV: + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: + num_functions = 17; + break; + case VXGE_HW_FUNCTION_MODE_SRIOV_4: + num_functions = 4; + break; + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: + num_functions = 2; + break; + case VXGE_HW_FUNCTION_MODE_MRIOV_8: + num_functions = 8; /* TODO */ + break; + } + return num_functions; +} + /** * vxge_probe * @pdev : structure containing the PCI related information of the device. @@ -4021,14 +4023,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) u8 *macaddr; struct vxge_mac_addrs *entry; static int bus = -1, device = -1; + u32 host_type; u8 new_device = 0; + enum vxge_hw_status is_privileged; + u32 function_mode; + u32 num_vfs = 0; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); attr.pdev = pdev; - if (bus != pdev->bus->number) - new_device = 1; - if (device != PCI_SLOT(pdev->devfn)) + /* In SRIOV-17 mode, functions of the same adapter + * can be deployed on different buses */ + if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || + (device != PCI_SLOT(pdev->devfn)))) new_device = 1; bus = pdev->bus->number; @@ -4045,9 +4052,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) driver_config->total_dev_cnt); driver_config->config_dev_cnt = 0; driver_config->total_dev_cnt = 0; - driver_config->g_no_cpus = 0; } - + /* Now making the CPU based no of vpath calculation + * applicable for individual functions as well. + */ + driver_config->g_no_cpus = 0; driver_config->vpath_per_dev = max_config_vpath; driver_config->total_dev_cnt++; @@ -4160,6 +4169,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) "%s:%d Vpath mask = %llx", __func__, __LINE__, (unsigned long long)vpath_mask); + function_mode = ll_config.device_hw_info.function_mode; + host_type = ll_config.device_hw_info.host_type; + is_privileged = __vxge_hw_device_is_privilaged(host_type, + ll_config.device_hw_info.func_id); + /* Check how many vpaths are available */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!((vpath_mask) & vxge_mBIT(i))) @@ -4167,14 +4181,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) max_vpath_supported++; } + if (new_device) + num_vfs = vxge_get_num_vfs(function_mode) - 1; + /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ - if ((VXGE_HW_FUNCTION_MODE_SRIOV == - ll_config.device_hw_info.function_mode) && - (max_config_dev > 1) && (pdev->is_physfn)) { - ret = pci_enable_sriov(pdev, max_config_dev - 1); - if (ret) - vxge_debug_ll_config(VXGE_ERR, - "Failed to enable SRIOV: %d \n", ret); + if (is_sriov(function_mode) && (max_config_dev > 1) && + (ll_config.intr_type != INTA) && + (is_privileged == VXGE_HW_OK)) { + ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) + ? (max_config_dev - 1) : num_vfs); + if (ret) + vxge_debug_ll_config(VXGE_ERR, + "Failed in enabling SRIOV mode: %d\n", ret); } /* diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 7c83ba4be9d..60276b20fa5 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h @@ -31,6 +31,7 @@ #define PCI_DEVICE_ID_TITAN_UNI 0x5833 #define VXGE_USE_DEFAULT 0xffffffff #define VXGE_HW_VPATH_MSIX_ACTIVE 4 +#define VXGE_ALARM_MSIX_ID 2 #define VXGE_HW_RXSYNC_FREQ_CNT 4 #define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) #define VXGE_LL_RX_COPY_THRESHOLD 256 @@ -89,6 +90,11 @@ #define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) +#define is_sriov(function_mode) \ + ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \ + (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \ + (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4)) + enum vxge_reset_event { /* reset events */ VXGE_LL_VPATH_RESET = 0, diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 2c012f4ce46..f83e6aee3f6 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -231,8 +231,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) { __vxge_hw_pio_mem_write32_upper( - (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), - 0, 32), + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &channel->common_reg->set_msix_mask_vect[msix_id%4]); return; @@ -252,8 +251,7 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) { __vxge_hw_pio_mem_write32_upper( - (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), - 0, 32), + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &channel->common_reg->clear_msix_mask_vect[msix_id%4]); return; @@ -878,7 +876,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) channel = &ring->channel; - rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; if (ring->stats->common_stats.usage_cnt > 0) ring->stats->common_stats.usage_cnt--; @@ -902,7 +900,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) channel = &ring->channel; wmb(); - rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; vxge_hw_channel_dtr_post(channel, rxdh); @@ -966,6 +964,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( struct __vxge_hw_channel *channel; struct vxge_hw_ring_rxd_1 *rxdp; enum vxge_hw_status status = VXGE_HW_OK; + u64 control_0, own; channel = &ring->channel; @@ -977,8 +976,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( goto exit; } + control_0 = rxdp->control_0; + own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); + /* check whether it is not the end */ - if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { + if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 0); @@ -986,8 +989,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed( ++ring->cmpl_cnt; vxge_hw_channel_dtr_complete(channel); - *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0); - vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); ring->stats->common_stats.usage_cnt++; @@ -1035,12 +1036,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode( * such as unknown UPV6 header), Drop it !!! */ - if (t_code == 0 || t_code == 5) { + if (t_code == VXGE_HW_RING_T_CODE_OK || + t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { status = VXGE_HW_OK; goto exit; } - if (t_code > 0xF) { + if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { status = VXGE_HW_ERR_INVALID_TCODE; goto exit; } @@ -2216,29 +2218,24 @@ exit: * This API will associate a given MSIX vector numbers with the four TIM * interrupts and alarm interrupt. */ -enum vxge_hw_status +void vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, int alarm_msix_id) { u64 val64; struct __vxge_hw_virtualpath *vpath = vp->vpath; struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; - u32 first_vp_id = vpath->hldev->first_vp_id; + u32 vp_id = vp->vpath->vp_id; val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( - (first_vp_id * 4) + tim_msix_id[0]) | + (vp_id * 4) + tim_msix_id[0]) | VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( - (first_vp_id * 4) + tim_msix_id[1]) | - VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI( - (first_vp_id * 4) + tim_msix_id[2]); - - val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI( - (first_vp_id * 4) + tim_msix_id[3]); + (vp_id * 4) + tim_msix_id[1]); writeq(val64, &vp_reg->interrupt_cfg0); writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( - (first_vp_id * 4) + alarm_msix_id), + (vpath->hldev->first_vp_id * 4) + alarm_msix_id), &vp_reg->interrupt_cfg2); if (vpath->hldev->config.intr_mode == @@ -2259,7 +2256,7 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, 0, 32), &vp_reg->one_shot_vect3_en); } - return VXGE_HW_OK; + return; } /** @@ -2279,8 +2276,7 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; __vxge_hw_pio_mem_write32_upper( - (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + - (msix_id / 4)), 0, 32), + (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); return; @@ -2305,14 +2301,12 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { __vxge_hw_pio_mem_write32_upper( - (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + - (msix_id/4)), 0, 32), + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg-> clr_msix_one_shot_vec[msix_id%4]); } else { __vxge_hw_pio_mem_write32_upper( - (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + - (msix_id/4)), 0, 32), + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg-> clear_msix_mask_vect[msix_id%4]); } @@ -2337,8 +2331,7 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) { struct __vxge_hw_device *hldev = vp->vpath->hldev; __vxge_hw_pio_mem_write32_upper( - (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + - (msix_id/4)), 0, 32), + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); return; diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 861c853e3e8..c252f3d3f65 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h @@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info { u32 rth_hash_type; u32 rth_value; }; +/** + * enum vxge_hw_ring_tcode - Transfer codes returned by adapter + * @VXGE_HW_RING_T_CODE_OK: Transfer ok. + * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation + * configuration mismatch. + * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation + * configuration mismatch. + * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum + * presentation configuration mismatch. + * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet, + * such as unknown IPv6 header. + * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity + * error, such as FCS or ECC). + * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer( + * s) were not appropriately sized and data loss occurred. + * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted. + * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of + * Segment1 exceeded the capacity of Buffer1 and the remainder + * was placed in Buffer2. Segment2 now starts in Buffer3. + * No data loss or errors occurred. + * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs + * assigned buffers has a size of 0 bytes. + * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to + * VPath Reset or because of a VPIN mismatch. + * @VXGE_HW_RING_T_CODE_UNUSED: Unused + * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one + * transfer code condition occurred. + * + * Transfer codes returned by adapter. + */ +enum vxge_hw_ring_tcode { + VXGE_HW_RING_T_CODE_OK = 0x0, + VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1, + VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2, + VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3, + VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5, + VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6, + VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7, + VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8, + VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9, + VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA, + VXGE_HW_RING_T_CODE_FRM_DROP = 0xC, + VXGE_HW_RING_T_CODE_UNUSED = 0xE, + VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF +}; /** * enum enum vxge_hw_ring_hash_type - RTH hash types @@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post( void *rxdh); enum vxge_hw_status -vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag); +vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle); void vxge_hw_ring_rxd_post_post_wmb( @@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free( #define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) #define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) -#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64 /* * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. @@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process( struct __vxge_hw_vpath_handle *vpath_handle, u32 skip_alarms); -enum vxge_hw_status +void vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, int *tim_msix_id, int alarm_msix_id); diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 77c2a754b7b..5da7ab1fd30 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h @@ -17,7 +17,7 @@ #define VXGE_VERSION_MAJOR "2" #define VXGE_VERSION_MINOR "0" -#define VXGE_VERSION_FIX "6" -#define VXGE_VERSION_BUILD "18937" +#define VXGE_VERSION_FIX "8" +#define VXGE_VERSION_BUILD "20182" #define VXGE_VERSION_FOR "k" #endif diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index b36bf96eb50..f0bd70fb650 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -811,7 +811,7 @@ static ssize_t cosa_read(struct file *file, cosa_enable_rx(chan); spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->rxwaitq, &wait); - while(!chan->rx_status) { + while (!chan->rx_status) { current->state = TASK_INTERRUPTIBLE; spin_unlock_irqrestore(&cosa->lock, flags); schedule(); @@ -896,7 +896,7 @@ static ssize_t cosa_write(struct file *file, spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->txwaitq, &wait); - while(!chan->tx_status) { + while (!chan->tx_status) { current->state = TASK_INTERRUPTIBLE; spin_unlock_irqrestore(&cosa->lock, flags); schedule(); @@ -1153,7 +1153,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa, struct channel_data *channel, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; - switch(cmd) { + switch (cmd) { case COSAIORSET: /* Reset the device */ if (!capable(CAP_NET_ADMIN)) return -EACCES; @@ -1704,7 +1704,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status) spin_unlock_irqrestore(&cosa->lock, flags); return; } - while(1) { + while (1) { cosa->txchan++; i++; if (cosa->txchan >= cosa->nchannels) @@ -2010,7 +2010,7 @@ again: static void debug_status_in(struct cosa_data *cosa, int status) { char *s; - switch(status & SR_CMD_FROM_SRP_MASK) { + switch (status & SR_CMD_FROM_SRP_MASK) { case SR_UP_REQUEST: s = "RX_REQ"; break; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index f88c07c1319..a4859f7a7cc 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -89,6 +89,7 @@ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/slab.h> #include <asm/system.h> #include <asm/cache.h> diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 40d724a8e02..e087b9a6daa 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -20,6 +20,7 @@ #include <linux/version.h> #include <linux/pci.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/if.h> diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c index 80114c93bae..4dde2ea4a18 100644 --- a/drivers/net/wan/hd64570.c +++ b/drivers/net/wan/hd64570.c @@ -37,7 +37,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <asm/io.h> diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index 84f01373e11..aad9ed45c25 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -37,7 +37,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <asm/io.h> diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index f1bff98acd1..ee7083fbea5 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -20,7 +20,6 @@ #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> -#include <linux/slab.h> #undef DEBUG_HARD_HEADER @@ -141,7 +140,7 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev) data->address != CISCO_UNICAST) return cpu_to_be16(ETH_P_HDLC); - switch(data->protocol) { + switch (data->protocol) { case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IPX): case cpu_to_be16(ETH_P_IPV6): @@ -190,7 +189,7 @@ static int cisco_rx(struct sk_buff *skb) cisco_data = (struct cisco_packet*)(skb->data + sizeof (struct hdlc_header)); - switch(ntohl (cisco_data->type)) { + switch (ntohl (cisco_data->type)) { case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ in_dev = dev->ip_ptr; addr = 0; @@ -245,8 +244,8 @@ static int cisco_rx(struct sk_buff *skb) dev_kfree_skb_any(skb); return NET_RX_SUCCESS; - } /* switch(keepalive type) */ - } /* switch(protocol) */ + } /* switch (keepalive type) */ + } /* switch (protocol) */ printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name, ntohs(data->protocol)); diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 19f51fdd552..5dc153e8a29 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -20,7 +20,6 @@ #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> -#include <linux/slab.h> static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 1b30fcc2414..05c9b0b9623 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/etherdevice.h> +#include <linux/gfp.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> @@ -21,7 +22,6 @@ #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> -#include <linux/slab.h> static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index aa9248f8eb1..c7adbb79f7c 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -10,6 +10,7 @@ */ #include <linux/errno.h> +#include <linux/gfp.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> @@ -21,7 +22,6 @@ #include <linux/poll.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <net/x25device.h> static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -202,10 +202,10 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return 0; /* return protocol only, no settable parameters */ case IF_PROTO_X25: - if(!capable(CAP_NET_ADMIN)) + if (!capable(CAP_NET_ADMIN)) return -EPERM; - if(dev->flags & IFF_UP) + if (dev->flags & IFF_UP) return -EBUSY; result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 74164d29524..48edc5f4dac 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/hdlc.h> #include <linux/ioport.h> +#include <linux/slab.h> #include <net/arp.h> #include <asm/irq.h> diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index c705046d861..0c2cdde686a 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/poll.h> +#include <linux/slab.h> #include <mach/npe.h> #include <mach/qmgr.h> diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index d1e3c673e9b..98e2f99903d 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/net.h> diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c index f327674fc93..5920c996fcd 100644 --- a/drivers/net/wan/lmc/lmc_media.c +++ b/drivers/net/wan/lmc/lmc_media.c @@ -6,7 +6,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/if_arp.h> diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 044a48175c4..f600075e84a 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -25,7 +25,6 @@ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/if_arp.h> diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index f4f1c00d0d2..17502d80b65 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -228,6 +228,7 @@ static char rcsid[] = #include <linux/etherdevice.h> #include <linux/spinlock.h> #include <linux/if.h> +#include <linux/slab.h> #include <net/arp.h> #include <asm/io.h> @@ -395,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch) u16 next_bd = card->chan[ch].tx_next_bd; u32 scabase = card->hw.scabase; - printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); + printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd); printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, first_bd, TX_BD_ADDR(ch, first_bd), next_bd, TX_BD_ADDR(ch, next_bd)); diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 4917a94943b..4293889e287 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c @@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip) int res; if (!tty || !tty->driver_data ) { - CPC_TTY_DBG("hdlx-tty: no TTY in close \n"); + CPC_TTY_DBG("hdlx-tty: no TTY in close\n"); return; } diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 25477b5cde4..cff13a9597c 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -43,7 +43,6 @@ #include <linux/fcntl.h> #include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 31c41af2246..43ae6f440bf 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map) return(-EINVAL); if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ - printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr); + printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr); return(-EINVAL); } base = map->base_addr; diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 61249f489e3..e91457d6023 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -23,6 +23,7 @@ #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> +#include <linux/slab.h> #include <net/arp.h> #include <asm/irq.h> diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index b9f520b7db6..80d5c5834a0 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -34,6 +34,7 @@ #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/compat.h> +#include <linux/slab.h> #include "x25_asy.h" #include <net/x25device.h> diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 0be7ec7299d..fbf5e843d48 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -47,6 +47,7 @@ #include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> +#include <linux/gfp.h> #include <asm/dma.h> #include <asm/io.h> #define RT_LOCK diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 94494554039..6180772dcc0 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -76,6 +76,7 @@ #include <stdarg.h> #include "i2400m.h" #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/wimax/i2400m.h> diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 6cead321bc1..94dc83c3969 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -69,6 +69,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/suspend.h> +#include <linux/slab.h> #define D_SUBMODULE driver #include "debug-levels.h" diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index e803a7dc650..3f283bff0ff 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -156,6 +156,7 @@ */ #include <linux/firmware.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/usb.h> #include "i2400m.h" @@ -612,7 +613,7 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m, goto error_wait_for_ack; } rx_bytes = result; - /* verify the ack and read more if neccessary [result is the + /* verify the ack and read more if necessary [result is the * final amount of bytes we get in the ack] */ result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags); if (result < 0) diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 04df9bbe340..820b128705e 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h @@ -627,7 +627,7 @@ enum i2400m_bm_cmd_flags { * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed * directly to wait for a reboot barker from the device. * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot - * rom after reading the MAC adress. This is quite a dirty hack, + * rom after reading the MAC address. This is quite a dirty hack, * if you ask me -- the device requires the bootrom to be * intialized after reading the MAC address. */ diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 599aa4eb9ba..b811c2f1f5e 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -73,6 +73,7 @@ * alloc_netdev. */ #include <linux/if_arp.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include "i2400m.h" diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 43927b5d7ad..035e4cf3e6e 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -34,6 +34,7 @@ */ #include "i2400m.h" #include <linux/wimax/i2400m.h> +#include <linux/slab.h> diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 7ddb173fd4a..fa2e11e5b4b 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -144,6 +144,7 @@ * i2400m_msg_size_check * wimax_msg */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/netdevice.h> diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c index 8adf6c9b6f8..d619da33f20 100644 --- a/drivers/net/wimax/i2400m/sdio-rx.c +++ b/drivers/net/wimax/i2400m/sdio-rx.c @@ -65,6 +65,7 @@ #include <linux/skbuff.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> +#include <linux/slab.h> #include "i2400m-sdio.h" #define D_SUBMODULE rx diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c index 76a50ac02eb..7632f80954e 100644 --- a/drivers/net/wimax/i2400m/sdio.c +++ b/drivers/net/wimax/i2400m/sdio.c @@ -48,6 +48,7 @@ * __i2400ms_send_barker() */ +#include <linux/slab.h> #include <linux/debugfs.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio.h> @@ -304,7 +305,7 @@ error_kzalloc: * * The device will be fully reset internally, but won't be * disconnected from the bus (so no reenumeration will - * happen). Firmware upload will be neccessary. + * happen). Firmware upload will be necessary. * * The device will send a reboot barker that will trigger the driver * to reinitialize the state via __i2400m_dev_reset_handle. @@ -314,7 +315,7 @@ error_kzalloc: * * The device will be fully reset internally, disconnected from the * bus an a reenumeration will happen. Firmware upload will be - * neccessary. Thus, we don't do any locking or struct + * necessary. Thus, we don't do any locking or struct * reinitialization, as we are going to be fully disconnected and * reenumerated. * diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c index 54480e8947f..6db909ecf1c 100644 --- a/drivers/net/wimax/i2400m/tx.c +++ b/drivers/net/wimax/i2400m/tx.c @@ -244,6 +244,7 @@ * (FIFO empty). */ #include <linux/netdevice.h> +#include <linux/slab.h> #include "i2400m.h" @@ -688,7 +689,7 @@ try_new: pl_type, buf_len); tx_msg->num_pls = le16_to_cpu(num_pls+1); tx_msg->size += padded_len; - d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", + d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n", padded_len, tx_msg->size, num_pls+1); d_printf(2, dev, "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index ce6b9938fde..b58ec56b86f 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -73,6 +73,7 @@ * i2400m_notif_submit */ #include <linux/usb.h> +#include <linux/gfp.h> #include "i2400m-usb.h" diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c index f88d1c6e35c..7b6a1d98bd7 100644 --- a/drivers/net/wimax/i2400m/usb-notif.c +++ b/drivers/net/wimax/i2400m/usb-notif.c @@ -56,6 +56,7 @@ * i2400mu_rx_kick() */ #include <linux/usb.h> +#include <linux/slab.h> #include "i2400m-usb.h" diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c index ba1b02362df..a26483a812a 100644 --- a/drivers/net/wimax/i2400m/usb-rx.c +++ b/drivers/net/wimax/i2400m/usb-rx.c @@ -83,6 +83,7 @@ * i2400mu_rx_release() called from i2400mu_bus_dev_stop() */ #include <linux/workqueue.h> +#include <linux/slab.h> #include <linux/usb.h> #include "i2400m-usb.h" diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 98f4f8c5fb6..d8c4d6497fd 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -66,6 +66,7 @@ #include "i2400m-usb.h" #include <linux/wimax/i2400m.h> #include <linux/debugfs.h> +#include <linux/slab.h> #define D_SUBMODULE usb @@ -246,7 +247,7 @@ error_kzalloc: * * The device will be fully reset internally, but won't be * disconnected from the USB bus (so no reenumeration will - * happen). Firmware upload will be neccessary. + * happen). Firmware upload will be necessary. * * The device will send a reboot barker in the notification endpoint * that will trigger the driver to reinitialize the state @@ -257,7 +258,7 @@ error_kzalloc: * * The device will be fully reset internally, disconnected from the * USB bus an a reenumeration will happen. Firmware upload will be - * neccessary. Thus, we don't do any locking or struct + * necessary. Thus, we don't do any locking or struct * reinitialization, as we are going to be fully disconnected and * reenumerated. * diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 547912e6843..880ad9d170c 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/if.h> #include <linux/skbuff.h> +#include <linux/slab.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/delay.h> @@ -1317,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev, } static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { - unsigned int bit_nr, i; + unsigned int bit_nr; u32 mc_filter[2]; + struct netdev_hw_addr *ha; mc_filter[1] = mc_filter[0] = 0; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_hw_addr_list_for_each(ha, mc_list) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; bit_nr &= 0x3F; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - mclist = mclist->next; } return mc_filter[0] | ((u64)(mc_filter[1]) << 32); diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index fed6695ec04..0312cee3957 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -38,6 +38,7 @@ */ #include <linux/init.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <net/mac80211.h> @@ -394,7 +395,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar) ieee80211_tx_status_irqsafe(ar->hw, skb); } - for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) { + for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) { #ifdef AR9170_QUEUE_STOP_DEBUG printk(KERN_DEBUG "%s: wake queue %d\n", wiphy_name(ar->hw->wiphy), i); @@ -2046,21 +2047,17 @@ out: return err; } -static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, - struct dev_addr_list *mclist) +static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { u64 mchash; - int i; + struct netdev_hw_addr *ha; /* always get broadcast frames */ mchash = 1ULL << (0xff >> 2); - for (i = 0; i < mc_count; i++) { - if (WARN_ON(!mclist)) - break; - mchash |= 1ULL << (mclist->dmi_addr[5] >> 2); - mclist = mclist->next; - } + netdev_hw_addr_list_for_each(ha, mc_list) + mchash |= 1ULL << (ha->addr[5] >> 2); return mchash; } @@ -2513,7 +2510,7 @@ void *ar9170_alloc(size_t priv_size) /* * this buffer is used for rx stream reconstruction. * Under heavy load this device (or the transport layer?) - * tends to split the streams into seperate rx descriptors. + * tends to split the streams into separate rx descriptors. */ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 0259bb46247..c1c7c427501 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -38,6 +38,7 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/etherdevice.h> diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index dcf7c30f813..e0c244b02f0 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -21,6 +21,7 @@ \*************************************/ #include <linux/pci.h> +#include <linux/slab.h> #include "ath5k.h" #include "reg.h" #include "debug.h" diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 92327423325..93005f1d326 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -50,6 +50,7 @@ #include <linux/pci.h> #include <linux/ethtool.h> #include <linux/uaccess.h> +#include <linux/slab.h> #include <net/ieee80211_radiotap.h> @@ -230,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); static int ath5k_config(struct ieee80211_hw *hw, u32 changed); static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mc_list); + struct netdev_hw_addr_list *mc_list); static void ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, @@ -3072,22 +3073,20 @@ unlock: } static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { u32 mfilt[2], val; - int i; u8 pos; + struct netdev_hw_addr *ha; mfilt[0] = 0; mfilt[1] = 1; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; + netdev_hw_addr_list_for_each(ha, mc_list) { /* calculate XOR of eight 6-bit values */ - val = get_unaligned_le32(mclist->dmi_addr + 0); + val = get_unaligned_le32(ha->addr + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; - val = get_unaligned_le32(mclist->dmi_addr + 3); + val = get_unaligned_le32(ha->addr + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); @@ -3095,8 +3094,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, * but not sure, needs testing, if we do use this we'd * neet to inform below to not reset the mcast */ /* ath5k_hw_set_mcast_filterindex(ah, - * mclist->dmi_addr[5]); */ - mclist = mclist->next; + * ha->addr[5]); */ } return ((u64)(mfilt[1]) << 32) | mfilt[0]; diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index a3cbfe4fc38..ed0263672d6 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -21,6 +21,8 @@ * EEPROM access functions and helpers * \*************************************/ +#include <linux/slab.h> + #include "ath5k.h" #include "reg.h" #include "debug.h" diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 60873a4f617..3ce9afba1d8 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -21,6 +21,7 @@ */ #include <linux/delay.h> +#include <linux/slab.h> #include "ath5k.h" #include "reg.h" diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index b22d46509b6..9a8e419398f 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -14,6 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/slab.h> #include <asm/unaligned.h> #include "ath9k.h" diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 3b9f4c1f8d4..af730c7d50e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -15,6 +15,7 @@ */ #include <linux/io.h> +#include <linux/slab.h> #include <asm/unaligned.h> #include "hw.h" diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 6063f546370..b78308c3c4d 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/slab.h> + #include "ath9k.h" static char *dev_info = "ath9k"; diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c index c3b59390fe3..2547b3c4a26 100644 --- a/drivers/net/wireless/ath/ath9k/phy.c +++ b/drivers/net/wireless/ath/ath9k/phy.c @@ -39,6 +39,8 @@ * AR9287 - 11n single-band 1x1 MIMO for USB */ +#include <linux/slab.h> + #include "hw.h" /** diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 3c4b5d2d9e1..ee81291f2fb 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -15,6 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/slab.h> + #include "ath9k.h" static const struct ath_rate_table ar5416_11na_ratetable = { diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c index e95aaa3f18f..105ad40968f 100644 --- a/drivers/net/wireless/ath/ath9k/virtual.c +++ b/drivers/net/wireless/ath/ath9k/virtual.c @@ -14,6 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/slab.h> + #include "ath9k.h" struct ath9k_vif_iter_data { diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index d5c23328aef..24d59883d94 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -15,7 +15,6 @@ */ #include <linux/kernel.h> -#include <linux/slab.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include "regd.h" diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index be7abf8916a..fa40fdfea71 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -38,6 +38,7 @@ #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> +#include <linux/slab.h> #include <asm/div64.h> diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c index 976104f634a..94e4f1378fc 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/b43/lo.c @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/sched.h> +#include <linux/slab.h> static struct b43_lo_calib *b43_find_lo_calib(struct b43_txpower_lo_control *lo, diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 14cf3bd7ea5..997303bcf4a 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -42,6 +42,7 @@ #include <linux/skbuff.h> #include <linux/io.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <asm/unaligned.h> #include "b43.h" diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c index 984174bc7b0..609e7051e01 100644 --- a/drivers/net/wireless/b43/pcmcia.c +++ b/drivers/net/wireless/b43/pcmcia.c @@ -24,6 +24,7 @@ #include "pcmcia.h" #include <linux/ssb/ssb.h> +#include <linux/slab.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c index d90217c3a70..b6428ec16dd 100644 --- a/drivers/net/wireless/b43/phy_a.c +++ b/drivers/net/wireless/b43/phy_a.c @@ -26,6 +26,8 @@ */ +#include <linux/slab.h> + #include "b43.h" #include "phy_a.h" #include "phy_common.h" diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 382826a8da8..29bf34ced86 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -33,6 +33,7 @@ #include "main.h" #include <linux/bitrev.h> +#include <linux/slab.h> static const s8 b43_tssi2dbm_g_table[] = { diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index 185219e0a55..c6afe9d9459 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -23,6 +23,8 @@ */ +#include <linux/slab.h> + #include "b43.h" #include "main.h" #include "phy_lp.h" diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 9e93eb4a17c..3d6b3377596 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -23,6 +23,7 @@ */ #include <linux/delay.h> +#include <linux/slab.h> #include <linux/types.h> #include "b43.h" diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index a6062c3e89a..aa12273ae71 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/sched.h> +#include <linux/slab.h> static u16 generate_cookie(struct b43_pio_txqueue *q, diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 0d3ac64147a..4e56b7bbceb 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -16,6 +16,7 @@ #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> +#include <linux/slab.h> #include <linux/ssb/ssb.h> #include "sdio.h" diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c index 8b9387c6ff3..e91520d0312 100644 --- a/drivers/net/wireless/b43legacy/dma.c +++ b/drivers/net/wireless/b43legacy/dma.c @@ -37,6 +37,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/skbuff.h> +#include <linux/slab.h> #include <net/dst.h> /* 32bit DMA ops. */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 1d070be5a67..bb2dd9329aa 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -40,6 +40,7 @@ #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <net/dst.h> #include <asm/unaligned.h> diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c index aaf227203a9..35033dd342c 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/b43legacy/phy.c @@ -32,6 +32,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/types.h> #include "b43legacy.h" diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c index 017c0e9c37e..b033b0ed4ca 100644 --- a/drivers/net/wireless/b43legacy/pio.c +++ b/drivers/net/wireless/b43legacy/pio.c @@ -29,6 +29,7 @@ #include "xmit.h" #include <linux/delay.h> +#include <linux/slab.h> static void tx_start(struct b43legacy_pioqueue *queue) diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 3816df96a66..f4c56121d38 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -1,4 +1,5 @@ #include <linux/etherdevice.h> +#include <linux/slab.h> #include <net/lib80211.h> #include <linux/if_arp.h> diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 90108b698f1..c34a3b7f129 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -1,3 +1,5 @@ +#include <linux/slab.h> + #include "hostap_80211.h" #include "hostap_common.h" #include "hostap_wlan.h" diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index a2a203c90ba..7e72ac1de49 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -20,6 +20,7 @@ #include <linux/delay.h> #include <linux/random.h> #include <linux/if_arp.h> +#include <linux/slab.h> #include "hostap_wlan.h" #include "hostap.h" diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index d19748d90aa..a36501dbbe0 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -3,6 +3,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/if.h> +#include <linux/slab.h> #include <linux/wait.h> #include <linux/timer.h> #include <linux/skbuff.h> diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index 4dfb40a84c9..d737091cf6a 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c @@ -2,6 +2,7 @@ #include <linux/if_arp.h> #include <linux/sched.h> +#include <linux/slab.h> #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 9419cebca8a..9a082308a9d 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -1,5 +1,6 @@ /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ +#include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/ethtool.h> diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 4d97ae37499..d24dc7dc072 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -9,6 +9,7 @@ #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index fc04ccdc5be..33e79037770 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -12,6 +12,7 @@ #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 9e2ae8f3ecb..82de71a3aea 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -31,6 +31,7 @@ ******************************************************************************/ #include <linux/sched.h> +#include <linux/slab.h> #include "ipw2200.h" diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c index 65e8c175a4a..c9fe3c99cb0 100644 --- a/drivers/net/wireless/ipw2x00/libipw_geo.c +++ b/drivers/net/wireless/ipw2x00/libipw_geo.c @@ -34,7 +34,6 @@ #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c index 282b1f7ff1e..39a34da52d5 100644 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> +#include <linux/gfp.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> @@ -24,7 +25,6 @@ #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> -#include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c index 4d89f66f53b..3633c6682e4 100644 --- a/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -31,6 +31,7 @@ ******************************************************************************/ #include <linux/kmod.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/jiffies.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index 80e9bbc7884..32eb4709aca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/skbuff.h> +#include <linux/slab.h> #include <linux/wireless.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 15de649888d..bde3b4cbab9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 3c1cd417bbc..f7d85a2173c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/skbuff.h> +#include <linux/slab.h> #include <linux/wireless.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 8a002c6f3a6..0b497d4bc65 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index dbb50a8e0ce..0471c3f8713 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c @@ -60,6 +60,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include <linux/slab.h> #include <net/mac80211.h> #include "iwl-dev.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index f09bff823ab..2a89747d347 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/sched.h> +#include <linux/slab.h> #include <net/mac80211.h> #include "iwl-eeprom.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 5f5820249a2..607a91f3eb6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -26,6 +26,7 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/debugfs.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index f469aa92316..4a487639d93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -28,6 +28,8 @@ /* sparse doesn't like tracepoint macros */ #ifndef __CHECKER__ +#include "iwl-dev.h" + #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index ff4d012ce26..ae7319bb3a9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -28,7 +28,6 @@ #define __IWLWIFI_DEVICE_TRACE #include <linux/tracepoint.h> -#include "iwl-dev.h" #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) #undef TRACE_EVENT diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index fd37152abae..fb5bb487f3b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -63,6 +63,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index 4f54a5f71cd..0203a3bbf87 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -31,6 +31,7 @@ #include <linux/io.h> +#include "iwl-dev.h" #include "iwl-debug.h" #include "iwl-devtrace.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 2655dbdc817..581c683a850 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -29,6 +29,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 1dff14a67b2..267eb893590 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -28,6 +28,7 @@ *****************************************************************************/ #include <linux/etherdevice.h> +#include <linux/slab.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "iwl-eeprom.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index d817c9c184a..ae981932ce6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -25,6 +25,7 @@ * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ +#include <linux/slab.h> #include <linux/types.h> #include <linux/etherdevice.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index c3c6505a8c6..1ece2ea0977 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -29,6 +29,7 @@ #include <linux/etherdevice.h> #include <linux/sched.h> +#include <linux/slab.h> #include <net/mac80211.h> #include "iwl-eeprom.h" #include "iwl-dev.h" diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 24e969440b3..9f362024a29 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c index fc239a32cb6..902e95f70f6 100644 --- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -27,6 +27,7 @@ #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/ieee80211.h> +#include <linux/slab.h> #include <net/cfg80211.h> #include "iwm.h" diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index b5cbd2bfd52..330c7d9cf10 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c @@ -41,6 +41,7 @@ #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/sched.h> +#include <linux/slab.h> #include "iwm.h" #include "bus.h" diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c index 48930c1a0f7..724441368a1 100644 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c @@ -21,6 +21,7 @@ * */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/debugfs.h> @@ -89,7 +90,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val) for (i = 0; i < __IWM_DM_NR; i++) iwm->dbg.dbg_module[i] = 0; - for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) + for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level; return 0; diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c index 8091421ee5e..e80e776b74f 100644 --- a/drivers/net/wireless/iwmc3200wifi/eeprom.c +++ b/drivers/net/wireless/iwmc3200wifi/eeprom.c @@ -37,6 +37,7 @@ */ #include <linux/kernel.h> +#include <linux/slab.h> #include "iwm.h" #include "umac.h" diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c index 373b5b5001d..9531b18cf72 100644 --- a/drivers/net/wireless/iwmc3200wifi/hal.c +++ b/drivers/net/wireless/iwmc3200wifi/hal.c @@ -98,6 +98,7 @@ */ #include <linux/kernel.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include "iwm.h" #include "bus.h" diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h index a3a79b5e289..a855a99e49b 100644 --- a/drivers/net/wireless/iwmc3200wifi/lmac.h +++ b/drivers/net/wireless/iwmc3200wifi/lmac.h @@ -262,7 +262,7 @@ struct iwm_ct_kill_cfg_cmd { /* Power Management */ #define POWER_TABLE_CMD 0x77 -#define SAVE_RESTORE_ADRESS_CMD 0x78 +#define SAVE_RESTORE_ADDRESS_CMD 0x78 #define REPLY_WATERMARK_CMD 0x79 #define PM_DEBUG_STATISTIC_NOTIFIC 0x7B #define PD_FLUSH_N_NOTIFICATION 0x7C diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c index 3a3510a6223..362002735b1 100644 --- a/drivers/net/wireless/iwmc3200wifi/main.c +++ b/drivers/net/wireless/iwmc3200wifi/main.c @@ -41,6 +41,7 @@ #include <linux/sched.h> #include <linux/ieee80211.h> #include <linux/wireless.h> +#include <linux/slab.h> #include "iwm.h" #include "debug.h" diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index c4c0d23c63e..13a69ebf2a9 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c @@ -46,6 +46,7 @@ * -> sdio_disable_func() */ #include <linux/netdevice.h> +#include <linux/slab.h> #include "iwm.h" #include "commands.h" diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 5090a8a6188..ad539877924 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -44,6 +44,7 @@ #include <linux/ieee80211.h> #include <linux/if_arp.h> #include <linux/list.h> +#include <linux/slab.h> #include <net/iw_handler.h> #include "iwm.h" @@ -1142,7 +1143,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf, return -EINVAL; } - for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { + for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { tid_info = &sta_info->tid_info[bit]; mutex_lock(&tid_info->mutex); diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c index a7ec7eac913..1eafd6dec3f 100644 --- a/drivers/net/wireless/iwmc3200wifi/sdio.c +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c @@ -63,6 +63,7 @@ */ #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/debugfs.h> #include <linux/mmc/sdio_ids.h> diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c index a20b936ae21..9537cdb13d3 100644 --- a/drivers/net/wireless/iwmc3200wifi/tx.c +++ b/drivers/net/wireless/iwmc3200wifi/tx.c @@ -64,6 +64,7 @@ * (i.e. half of the max size). [iwm_tx_worker] */ +#include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/ieee80211.h> diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c index 95d3d4c5e08..aa06070e5ea 100644 --- a/drivers/net/wireless/libertas/assoc.c +++ b/drivers/net/wireless/libertas/assoc.c @@ -4,6 +4,7 @@ #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/if_arp.h> +#include <linux/slab.h> #include <net/lib80211.h> #include "assoc.h" diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 82ebe1461a7..ce7bec402a3 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -6,6 +6,7 @@ * */ +#include <linux/slab.h> #include <net/cfg80211.h> #include "cfg.h" diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 82371ef3952..cdb9b9650d7 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -5,6 +5,7 @@ #include <linux/kfifo.h> #include <linux/sched.h> +#include <linux/slab.h> #include "host.h" #include "decl.h" diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index e7470442f76..88f7131d66e 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -2,6 +2,7 @@ * This file contains the handling of command * responses as well as events generated by firmware. */ +#include <linux/slab.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/if_arp.h> diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 9c3c2f82f03..6f5b843c1f4 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -4,6 +4,7 @@ #include <linux/delay.h> #include <linux/mm.h> #include <linux/string.h> +#include <linux/slab.h> #include <net/iw_handler.h> #include <net/lib80211.h> diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index 1f6cb58dd66..6d55439a7b9 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -22,6 +22,7 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/firmware.h> diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 33206a98a57..cd464a2589b 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -28,6 +28,7 @@ #include <linux/kernel.h> #include <linux/moduleparam.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/netdevice.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 3ea03f259ee..fe3f08028eb 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -24,6 +24,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/semaphore.h> +#include <linux/slab.h> #include <linux/spi/libertas_spi.h> #include <linux/spi/spi.h> diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 65e174595d1..fcea5741ba6 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -5,6 +5,7 @@ #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/usb.h> #ifdef CONFIG_OLPC diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 3c889f43d90..38edad6f24b 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -13,6 +13,7 @@ #include <linux/kfifo.h> #include <linux/stddef.h> #include <linux/ieee80211.h> +#include <linux/slab.h> #include <net/iw_handler.h> #include <net/cfg80211.h> @@ -318,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, struct net_device *dev, int nr_addrs) { int i = nr_addrs; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; int cnt; if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) @@ -326,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, netif_addr_lock_bh(dev); cnt = netdev_mc_count(dev); - netdev_for_each_mc_addr(mc_list, dev) { - if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { + netdev_for_each_mc_addr(ha, dev) { + if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) { lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, - mc_list->dmi_addr); + ha->addr); cnt--; continue; } if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) break; - memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); + memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN); lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, - mc_list->dmi_addr); + ha->addr); i++; cnt--; } diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 7a867e31ca5..e2b8d886b09 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -2,6 +2,7 @@ * This file contains the handling of RX in wlan driver. */ #include <linux/etherdevice.h> +#include <linux/slab.h> #include <linux/types.h> #include "host.h" diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index 220361e69cd..24cd54b3a80 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c @@ -4,6 +4,7 @@ * IOCTL handlers as well as command preperation and response routines * for sending scan commands to the firmware. */ +#include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/etherdevice.h> diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index aad6263dee6..f96a96031a5 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -2,6 +2,7 @@ * This file contains ioctl functions */ #include <linux/ctype.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/if.h> #include <linux/if_arp.h> diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c index 28790e03dc4..b620daf59ef 100644 --- a/drivers/net/wireless/libertas_tf/cmd.c +++ b/drivers/net/wireless/libertas_tf/cmd.c @@ -7,6 +7,8 @@ * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ +#include <linux/slab.h> + #include "libertas_tf.h" static const struct channel_range channel_ranges[] = { diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c index 3691c307e67..8cc9db60c14 100644 --- a/drivers/net/wireless/libertas_tf/if_usb.c +++ b/drivers/net/wireless/libertas_tf/if_usb.c @@ -11,6 +11,7 @@ #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/netdevice.h> +#include <linux/slab.h> #include <linux/usb.h> #define DRV_NAME "lbtf_usb" diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 6ab30033c26..7533a23e050 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -7,6 +7,8 @@ * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ +#include <linux/slab.h> + #include "libertas_tf.h" #include "linux/etherdevice.h" @@ -367,22 +369,20 @@ static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed) } static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct lbtf_private *priv = hw->priv; int i; + struct netdev_hw_addr *ha; + int mc_count = netdev_hw_addr_list_count(mc_list); if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) return mc_count; priv->nr_of_multicastmacaddr = mc_count; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - memcpy(&priv->multicastlist[i], mclist->da_addr, - ETH_ALEN); - mclist = mclist->next; - } + i = 0; + netdev_hw_addr_list_for_each(ha, mc_list) + memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN); return mc_count; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index ec8b0829179..dfff02f5c86 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -14,6 +14,7 @@ */ #include <linux/list.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <net/dst.h> #include <net/xfrm.h> diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 4e58ebe1558..73bbd080c6e 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/completion.h> #include <linux/etherdevice.h> +#include <linux/slab.h> #include <net/mac80211.h> #include <linux/moduleparam.h> #include <linux/firmware.h> @@ -1938,11 +1939,15 @@ struct mwl8k_cmd_mac_multicast_adr { static struct mwl8k_cmd_pkt * __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_mac_multicast_adr *cmd; int size; + int mc_count = 0; + + if (mc_list) + mc_count = netdev_hw_addr_list_count(mc_list); if (allmulti || mc_count > priv->num_mcaddrs) { allmulti = 1; @@ -1963,17 +1968,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, if (allmulti) { cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); } else if (mc_count) { - int i; + struct netdev_hw_addr *ha; + int i = 0; cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); - for (i = 0; i < mc_count && mclist; i++) { - if (mclist->da_addrlen != ETH_ALEN) { - kfree(cmd); - return NULL; - } - memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN); - mclist = mclist->next; + netdev_hw_addr_list_for_each(ha, mc_list) { + memcpy(cmd->addr[i], ha->addr, ETH_ALEN); } } @@ -3552,7 +3553,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct mwl8k_cmd_pkt *cmd; @@ -3563,7 +3564,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, * we'll end up throwing this packet away and creating a new * one in mwl8k_configure_filter(). */ - cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist); + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list); return (unsigned long)cmd; } @@ -3686,7 +3687,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw, */ if (*total_flags & FIF_ALLMULTI) { kfree(cmd); - cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL); + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL); } if (cmd != NULL) { diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c index cfa72962052..5ea0f7cf85b 100644 --- a/drivers/net/wireless/orinoco/fw.c +++ b/drivers/net/wireless/orinoco/fw.c @@ -3,6 +3,7 @@ * See copyright notice in main.c */ #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/device.h> diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 883b8f86862..9f657afaa3e 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -1056,14 +1056,14 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, * group address if either we want to multicast, or if we were * multicasting and want to stop */ if (!promisc && (mc_count || priv->mc_count)) { - struct dev_mc_list *p; + struct netdev_hw_addr *ha; struct hermes_multicast mclist; int i = 0; - netdev_for_each_mc_addr(p, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i == mc_count) break; - memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN); + memcpy(mclist.addr[i++], ha->addr, ETH_ALEN); } err = hermes_write_ltv(hw, USER_BAP, diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h index 9799a1d14a6..97af71e7995 100644 --- a/drivers/net/wireless/orinoco/hw.h +++ b/drivers/net/wireless/orinoco/hw.h @@ -22,7 +22,6 @@ /* Forward declarations */ struct orinoco_private; -struct dev_addr_list; int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, size_t fw_name_len, u32 *hw_ver); diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index b42634c614b..413e9ab6cab 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -78,6 +78,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index d2f10e9c216..330d42d4533 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -3,6 +3,7 @@ * See copyright notice in main.c */ +#include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ieee80211.h> diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 29f9bc03190..57b850ebfeb 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -2,6 +2,7 @@ * * See copyright notice in main.c */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/wireless.h> diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index 8e3818f6832..187e263b045 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -20,6 +20,7 @@ #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/sort.h> +#include <linux/slab.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index e7b9e9cb39f..c43a5d461ab 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -17,6 +17,7 @@ */ #include <linux/init.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index 36f4c820ad0..7bbd9d3bba6 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -17,6 +17,7 @@ */ #include <linux/init.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index aa29663bf6c..86f3e9ac4c7 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index afd26bf0664..c8f09da1f84 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -29,6 +29,7 @@ #include <linux/spi/spi.h> #include <linux/etherdevice.h> #include <linux/gpio.h> +#include <linux/slab.h> #include "p54spi.h" #include "p54spi_eeprom.h" diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index e3cfc001d2f..743a6c68b29 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/usb.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index f7f5c793514..a45818ebfdf 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/if_arp.h> +#include <linux/slab.h> #include <linux/pci.h> #include <asm/uaccess.h> diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 7c82e432cca..10d91afefa3 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -19,6 +19,7 @@ */ #include <linux/module.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/ethtool.h> diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index af9e7fbd764..64585da8a96 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -17,6 +17,7 @@ */ #include <linux/module.h> +#include <linux/gfp.h> #include <linux/pci.h> #include <linux/delay.h> diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index 89b0278eb7e..a5224f6160e 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/system.h> diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h index 87a1734663d..0b27e50fe0d 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.h +++ b/drivers/net/wireless/prism54/islpci_mgt.h @@ -22,6 +22,7 @@ #include <linux/wireless.h> #include <linux/skbuff.h> +#include <linux/slab.h> /* * Function definitions diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c index 07df70a1007..9b796cae4af 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/prism54/oid_mgt.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> +#include <linux/slab.h> #include "prismcompat.h" #include "islpci_dev.h" diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index f1e916a3166..d9c45bfcee6 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -35,7 +35,6 @@ #include <linux/proc_fs.h> #include <linux/ptrace.h> #include <linux/seq_file.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/init.h> @@ -1875,17 +1874,17 @@ static void ray_update_multi_list(struct net_device *dev, int all) writeb(0xff, &pccs->var); local->num_multi = 0xff; } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i = 0; /* Copy the kernel's list of MC addresses to card */ - netdev_for_each_mc_addr(dmi, dev) { - memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy_toio(p, ha->addr, ETH_ALEN); dev_dbg(&link->dev, "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], - dmi->dmi_addr[2], dmi->dmi_addr[3], - dmi->dmi_addr[4], dmi->dmi_addr[5]); + ha->addr[0], ha->addr[1], + ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); p += ETH_ALEN; i++; } diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index aceb95ef727..babdcdf6d71 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -41,6 +41,7 @@ #include <linux/if_arp.h> #include <linux/ctype.h> #include <linux/spinlock.h> +#include <linux/slab.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <linux/usb/usbnet.h> @@ -1545,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) static void set_multicast_list(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; __le32 filter, basefilter; int ret; char *mc_addrs = NULL; @@ -1584,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev) return; } - netdev_for_each_mc_addr(mclist, usbdev->net) + netdev_for_each_mc_addr(ha, usbdev->net) memcpy(mc_addrs + i++ * ETH_ALEN, - mclist->dmi_addr, ETH_ALEN); + ha->addr, ETH_ALEN); } netif_addr_unlock_bh(usbdev->net); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 08a4789fc2d..cdbf59108ef 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> +#include <linux/slab.h> #include "rt2x00.h" #include "rt2x00pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index d084d70e5fe..89e986f449d 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> +#include <linux/slab.h> #include "rt2x00.h" #include "rt2x00pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index c1eec17fe18..7185cb05f25 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -29,6 +29,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" @@ -368,7 +369,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, /* * The encryption key doesn't fit within the CSR cache, - * this means we should allocate it seperately and use + * this means we should allocate it separately and use * rt2x00usb_vendor_request() to send the key to the hardware. */ reg = KEY_ENTRY(key->hw_key_idx); @@ -382,7 +383,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided - * both seperately as well as inside the frame. + * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 638600092f1..2648f315a93 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include "rt2x00.h" #if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 4c12f2ffe22..6b809ab42c6 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -99,7 +99,7 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, * There are 2 variations of the rt2870 firmware. * a) size: 4kb * b) size: 8kb - * Note that (b) contains 2 seperate firmware blobs of 4k + * Note that (b) contains 2 separate firmware blobs of 4k * within the file. The first blob is the same firmware as (a), * but the second blob is for the additional chipsets. */ @@ -117,7 +117,7 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, /* * 8kb firmware files must be checked as if it were - * 2 seperate firmware files. + * 2 separate firmware files. */ while (offset < len) { if (!rt2800usb_check_crc(data + offset, 4096)) diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index 70c04c282ef..9569fb4e5bc 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> +#include <linux/slab.h> #include <linux/uaccess.h> #include "rt2x00.h" @@ -109,7 +110,7 @@ struct rt2x00debug_intf { /* * HW crypto statistics. - * All statistics are stored seperately per cipher type. + * All statistics are stored separately per cipher type. */ struct rt2x00debug_crypto crypto_stats[CIPHER_MAX]; diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index b93731b7990..eda73ba735a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -25,6 +25,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include "rt2x00.h" #include "rt2x00lib.h" @@ -394,7 +395,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, /* * Hardware might have stripped the IV/EIV/ICV data, * in that case it is possible that the data was - * provided seperately (through hardware descriptor) + * provided separately (through hardware descriptor) * in which case we should reinsert the data into the frame. */ if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) && diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 047123b766f..cf3f1c0c438 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/slab.h> #include "rt2x00.h" #include "rt2x00pci.h" diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 0b4801a1460..a0bd36fc4d2 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -24,6 +24,7 @@ Abstract: rt2x00 queue specific routines. */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> @@ -497,7 +498,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, /* * When hardware encryption is supported, and this frame * is to be encrypted, we should strip the IV/EIV data from - * the frame so we can provide it to the driver seperately. + * the frame so we can provide it to the driver separately. */ if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c index 111c0ff5c6c..fc98063de71 100644 --- a/drivers/net/wireless/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/rt2x00/rt2x00soc.c @@ -28,6 +28,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "rt2x00.h" #include "rt2x00soc.h" diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 0a751e73aa0..f9a7f8b1741 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -25,6 +25,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/usb.h> #include <linux/bug.h> diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index ac69dbe5719..b9885981f3a 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -30,6 +30,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> @@ -476,7 +477,7 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev, * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it - * to be provided seperately for the descriptor. + * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 7ebe14b64fe..576ea9dd282 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -30,6 +30,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" @@ -339,7 +340,7 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it - * to be provided seperately for the descriptor. + * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. @@ -439,7 +440,7 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it - * to be provided seperately for the descriptor. + * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. @@ -1661,7 +1662,7 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry, /* * Hardware has stripped IV/EIV data from 802.11 frame during - * decryption. It has provided the data seperately but rt2x00lib + * decryption. It has provided the data separately but rt2x00lib * should decide if it should be reinserted. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 2b928ecf47b..6b46329b732 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/eeprom_93cx6.h> @@ -723,10 +724,10 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev, priv->rf->conf_erp(dev, info); } -static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count, - struct dev_addr_list *mc_list) +static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, + struct netdev_hw_addr_list *mc_list) { - return mc_count; + return netdev_hw_addr_list_count(mc_list); } static void rtl8180_configure_filter(struct ieee80211_hw *dev, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 0fb850e0c65..738921fda02 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/usb.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/eeprom_93cx6.h> @@ -1193,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, } static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev, - int mc_count, struct dev_addr_list *mc_list) + struct netdev_hw_addr_list *mc_list) { - return mc_count; + return netdev_hw_addr_list_count(mc_list); } static void rtl8187_configure_filter(struct ieee80211_hw *dev, diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c index beff084040b..91891f92807 100644 --- a/drivers/net/wireless/wl12xx/wl1251_acx.c +++ b/drivers/net/wireless/wl12xx/wl1251_acx.c @@ -1,6 +1,7 @@ #include "wl1251_acx.h" #include <linux/module.h> +#include <linux/slab.h> #include <linux/crc7.h> #include "wl1251.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c index acb334184d7..2545123931e 100644 --- a/drivers/net/wireless/wl12xx/wl1251_boot.c +++ b/drivers/net/wireless/wl12xx/wl1251_boot.c @@ -22,6 +22,7 @@ */ #include <linux/gpio.h> +#include <linux/slab.h> #include "wl1251_reg.h" #include "wl1251_boot.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c index 0320b478bb3..a37b30cef48 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c @@ -1,6 +1,7 @@ #include "wl1251_cmd.h" #include <linux/module.h> +#include <linux/slab.h> #include <linux/crc7.h> #include "wl1251.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c index 05e4d68eb4c..5e4465ac08f 100644 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c @@ -24,6 +24,7 @@ #include "wl1251_debugfs.h" #include <linux/skbuff.h> +#include <linux/slab.h> #include "wl1251.h" #include "wl1251_acx.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c index 5aad56ea715..b538bdd7b32 100644 --- a/drivers/net/wireless/wl12xx/wl1251_init.c +++ b/drivers/net/wireless/wl12xx/wl1251_init.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include "wl1251_init.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c index 7b8b21212bf..4d479708158 100644 --- a/drivers/net/wireless/wl12xx/wl1251_main.c +++ b/drivers/net/wireless/wl12xx/wl1251_main.c @@ -29,6 +29,7 @@ #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> +#include <linux/slab.h> #include "wl1251.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c index b56732226cc..6f229e0990f 100644 --- a/drivers/net/wireless/wl12xx/wl1251_rx.c +++ b/drivers/net/wireless/wl12xx/wl1251_rx.c @@ -23,6 +23,7 @@ */ #include <linux/skbuff.h> +#include <linux/gfp.h> #include <net/mac80211.h> #include "wl1251.h" diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c index df2ff8bc8ef..e81474203a2 100644 --- a/drivers/net/wireless/wl12xx/wl1251_spi.c +++ b/drivers/net/wireless/wl12xx/wl1251_spi.c @@ -23,6 +23,7 @@ #include <linux/irq.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/crc7.h> #include <linux/spi/spi.h> #include <linux/spi/wl12xx.h> diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c index 1a6b2ec1db5..2ad086efe06 100644 --- a/drivers/net/wireless/wl12xx/wl1271_acx.c +++ b/drivers/net/wireless/wl12xx/wl1271_acx.c @@ -27,6 +27,7 @@ #include <linux/platform_device.h> #include <linux/crc7.h> #include <linux/spi/spi.h> +#include <linux/slab.h> #include "wl1271.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c index f16d15bd564..8087dc17f29 100644 --- a/drivers/net/wireless/wl12xx/wl1271_boot.c +++ b/drivers/net/wireless/wl12xx/wl1271_boot.c @@ -22,6 +22,7 @@ */ #include <linux/gpio.h> +#include <linux/slab.h> #include "wl1271_acx.h" #include "wl1271_reg.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c index b19090334bc..6b5ba8ec94c 100644 --- a/drivers/net/wireless/wl12xx/wl1271_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c @@ -27,6 +27,7 @@ #include <linux/spi/spi.h> #include <linux/etherdevice.h> #include <linux/ieee80211.h> +#include <linux/slab.h> #include "wl1271.h" #include "wl1271_reg.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c index 3c0f5b1ac27..c239ef4d0b8 100644 --- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c @@ -24,6 +24,7 @@ #include "wl1271_debugfs.h" #include <linux/skbuff.h> +#include <linux/slab.h> #include "wl1271.h" #include "wl1271_acx.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c index b880382cf15..4447af1557f 100644 --- a/drivers/net/wireless/wl12xx/wl1271_init.c +++ b/drivers/net/wireless/wl12xx/wl1271_init.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include "wl1271_init.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c index 283d5dade1a..814f300c3f1 100644 --- a/drivers/net/wireless/wl12xx/wl1271_main.c +++ b/drivers/net/wireless/wl12xx/wl1271_main.c @@ -30,6 +30,7 @@ #include <linux/vmalloc.h> #include <linux/inetdevice.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "wl1271.h" #include "wl12xx_80211.h" @@ -1304,10 +1305,11 @@ struct wl1271_filter_params { u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; }; -static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, - struct dev_addr_list *mc_list) +static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { struct wl1271_filter_params *fp; + struct netdev_hw_addr *ha; struct wl1271 *wl = hw->priv; int i; @@ -1321,21 +1323,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, } /* update multicast filtering parameters */ - fp->enabled = true; - if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) { - mc_count = 0; - fp->enabled = false; - } - fp->mc_list_length = 0; - for (i = 0; i < mc_count; i++) { - if (mc_list->da_addrlen == ETH_ALEN) { + if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { + fp->enabled = false; + } else { + fp->enabled = true; + netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(fp->mc_list[fp->mc_list_length], - mc_list->da_addr, ETH_ALEN); + ha->addr, ETH_ALEN); fp->mc_list_length++; - } else - wl1271_warning("Unknown mc address length."); - mc_list = mc_list->next; + } } return (u64)(unsigned long)fp; diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c index ca442703d1a..57f4bfd959c 100644 --- a/drivers/net/wireless/wl12xx/wl1271_rx.c +++ b/drivers/net/wireless/wl12xx/wl1271_rx.c @@ -21,6 +21,8 @@ * */ +#include <linux/gfp.h> + #include "wl1271.h" #include "wl1271_acx.h" #include "wl1271_reg.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c index 7a7db011a79..5189b812f93 100644 --- a/drivers/net/wireless/wl12xx/wl1271_spi.c +++ b/drivers/net/wireless/wl12xx/wl1271_spi.c @@ -26,6 +26,7 @@ #include <linux/crc7.h> #include <linux/spi/spi.h> #include <linux/spi/wl12xx.h> +#include <linux/slab.h> #include "wl1271.h" #include "wl12xx_80211.h" diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c index 2401e6035d5..554deb4d024 100644 --- a/drivers/net/wireless/wl12xx/wl1271_testmode.c +++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c @@ -22,6 +22,7 @@ */ #include "wl1271_testmode.h" +#include <linux/slab.h> #include <net/genetlink.h> #include "wl1271.h" diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 6917286edca..1e2b684f8ed 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/usb.h> +#include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/wireless.h> @@ -875,7 +876,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) static void zd1201_set_multicast(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; int i; @@ -883,8 +884,8 @@ static void zd1201_set_multicast(struct net_device *dev) return; i = 0; - netdev_for_each_mc_addr(mc, dev) - memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) + memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, netdev_mc_count(dev) * ETH_ALEN, 0); } diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index 7ca95c414fa..b2af3c549bb 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -25,6 +25,7 @@ #include <linux/kernel.h> #include <linux/errno.h> +#include <linux/slab.h> #include "zd_def.h" #include "zd_chip.h" diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index e24099613d9..b0b666019a9 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -22,6 +22,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/slab.h> #include <linux/usb.h> #include <linux/jiffies.h> #include <net/ieee80211_radiotap.h> @@ -374,7 +375,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, * zd_mac_tx_failed - callback for failed frames * @dev: the mac80211 wireless device * - * This function is called if a frame couldn't be successfully be + * This function is called if a frame couldn't be successfully * transferred. The first frame from the tx queue, will be selected and * reported as error to the upper layers. */ @@ -947,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work) } static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct zd_mac *mac = zd_hw_mac(hw); struct zd_mc_hash hash; - int i; + struct netdev_hw_addr *ha; zd_mc_clear(&hash); - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr); - zd_mc_add_addr(&hash, mclist->dmi_addr); - mclist = mclist->next; + netdev_hw_addr_list_for_each(ha, mc_list) { + dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr); + zd_mc_add_addr(&hash, ha->addr); } return hash.low | ((u64)hash.high << 32); diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c index 439799b8487..9e74eb1b67d 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c +++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c @@ -19,6 +19,7 @@ */ #include <linux/kernel.h> +#include <linux/slab.h> #include "zd_rf.h" #include "zd_usb.h" diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 442fc111732..d91ad1a612a 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include <linux/device.h> #include <linux/errno.h> +#include <linux/slab.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <linux/workqueue.h> diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a869b45d3d3..d504e2b6025 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -40,6 +40,7 @@ #include <linux/udp.h> #include <linux/moduleparam.h> #include <linux/mm.h> +#include <linux/slab.h> #include <net/ip.h> #include <xen/xen.h> diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c index 1a74594224b..e9381fe3baf 100644 --- a/drivers/net/xilinx_emaclite.c +++ b/drivers/net/xilinx_emaclite.c @@ -19,6 +19,7 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/io.h> +#include <linux/slab.h> #include <linux/of_device.h> #include <linux/of_platform.h> @@ -638,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev) } skb_put(skb, len); /* Tell the skb how much data we got */ - skb->dev = dev; /* Fill out required meta-data */ skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_NONE; diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index 389ba9df712..fdba9cb3a59 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c @@ -20,11 +20,11 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> +#include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> -#include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> @@ -33,6 +33,7 @@ #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/pgtable.h> diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 7d4107f5eeb..efbff76a990 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -90,7 +90,6 @@ static int gx_fix; #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> -#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> @@ -1300,25 +1299,25 @@ static void set_rx_mode(struct net_device *dev) /* Too many to filter well, or accept all multicasts. */ iowrite16(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 hash_table[4]; int i; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit; /* Due to a bug in the early chip versions, multiple filter slots must be set for each address. */ if (yp->drv_flags & HasMulticastBug) { - bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); - bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); - bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } - bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } /* Copy the hash table to the chip. */ diff --git a/drivers/net/znet.c b/drivers/net/znet.c index def49d2ec69..dbfef8d70f2 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -88,6 +88,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> +#include <linux/slab.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/ioport.h> |