diff options
Diffstat (limited to 'drivers/net/ethernet')
302 files changed, 9620 insertions, 6525 deletions
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index f00c76377b4..65b735d4a6a 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -35,7 +35,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" - depends on (ISA || EISA) && ISA_DMA_API + depends on ISA && ISA_DMA_API ---help--- If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y and read the Ethernet-HOWTO, available from @@ -70,7 +70,7 @@ config VORTEX select MII ---help--- This option enables driver support for a large number of 10Mbps and - 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: + 10/100Mbps EISA, PCI and Cardbus 3Com network cards: "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI "Boomerang" (EtherLink XL 3c900 or 3c905) PCI diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 144942f6372..465cc7108d8 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h index ef325ffa1b5..2923c51bb35 100644 --- a/drivers/net/ethernet/8390/8390.h +++ b/drivers/net/ethernet/8390/8390.h @@ -28,42 +28,42 @@ extern int ei_debug; #endif #ifdef CONFIG_NET_POLL_CONTROLLER -extern void ei_poll(struct net_device *dev); -extern void eip_poll(struct net_device *dev); +void ei_poll(struct net_device *dev); +void eip_poll(struct net_device *dev); #endif /* Without I/O delay - non ISA or later chips */ -extern void NS8390_init(struct net_device *dev, int startp); -extern int ei_open(struct net_device *dev); -extern int ei_close(struct net_device *dev); -extern irqreturn_t ei_interrupt(int irq, void *dev_id); -extern void ei_tx_timeout(struct net_device *dev); -extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); -extern void ei_set_multicast_list(struct net_device *dev); -extern struct net_device_stats *ei_get_stats(struct net_device *dev); +void NS8390_init(struct net_device *dev, int startp); +int ei_open(struct net_device *dev); +int ei_close(struct net_device *dev); +irqreturn_t ei_interrupt(int irq, void *dev_id); +void ei_tx_timeout(struct net_device *dev); +netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); +void ei_set_multicast_list(struct net_device *dev); +struct net_device_stats *ei_get_stats(struct net_device *dev); extern const struct net_device_ops ei_netdev_ops; -extern struct net_device *__alloc_ei_netdev(int size); +struct net_device *__alloc_ei_netdev(int size); static inline struct net_device *alloc_ei_netdev(void) { return __alloc_ei_netdev(0); } /* With I/O delay form */ -extern void NS8390p_init(struct net_device *dev, int startp); -extern int eip_open(struct net_device *dev); -extern int eip_close(struct net_device *dev); -extern irqreturn_t eip_interrupt(int irq, void *dev_id); -extern void eip_tx_timeout(struct net_device *dev); -extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); -extern void eip_set_multicast_list(struct net_device *dev); -extern struct net_device_stats *eip_get_stats(struct net_device *dev); +void NS8390p_init(struct net_device *dev, int startp); +int eip_open(struct net_device *dev); +int eip_close(struct net_device *dev); +irqreturn_t eip_interrupt(int irq, void *dev_id); +void eip_tx_timeout(struct net_device *dev); +netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); +void eip_set_multicast_list(struct net_device *dev); +struct net_device_stats *eip_get_stats(struct net_device *dev); extern const struct net_device_ops eip_netdev_ops; -extern struct net_device *__alloc_eip_netdev(int size); +struct net_device *__alloc_eip_netdev(int size); static inline struct net_device *alloc_eip_netdev(void) { return __alloc_eip_netdev(0); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index becef25fa19..0988811f4e4 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -146,13 +146,6 @@ config PCMCIA_PCNET To compile this driver as a module, choose M here: the module will be called pcnet_cs. If unsure, say N. -config NE_H8300 - tristate "NE2000 compatible support for H8/300" - depends on H8300H_AKI3068NET || H8300H_H8MAX - ---help--- - Say Y here if you want to use the NE2000 compatible - controller on the Renesas H8/300 processor. - config STNIC tristate "National DP83902AV support" depends on SUPERH diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index 588954a79b2..ff3b3189418 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_HYDRA) += hydra.o 8390.o obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o obj-$(CONFIG_NE2000) += ne.o 8390p.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index f92f001551d..36fa577970b 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; - memcpy(dev->dev_addr, SA_prom, 6); + memcpy(dev->dev_addr, SA_prom, ETH_ALEN); } #ifdef CONFIG_AX88796_93CX6 diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c deleted file mode 100644 index 7fc28f2d28a..00000000000 --- a/drivers/net/ethernet/8390/ne-h8300.c +++ /dev/null @@ -1,684 +0,0 @@ -/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ -/* - original ne.c - Written 1992-94 by Donald Becker. - - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This software may be used and distributed according to the terms - of the GNU General Public License, incorporated herein by reference. - - The author may be reached as becker@scyld.com, or C/O - Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 - - H8/300 modified - Yoshinori Sato <ysato@users.sourceforge.jp> -*/ - -static const char version1[] = -"ne-h8300.c:v1.00 2004/04/11 ysato\n"; - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/jiffies.h> - -#include <asm/io.h> -#include <asm/irq.h> - -#define EI_SHIFT(x) (ei_local->reg_offset[x]) - -#include "8390.h" - -#define DRV_NAME "ne-h8300" - -/* Some defines that people can play with if so inclined. */ - -/* Do we perform extra sanity checks on stuff ? */ -/* #define NE_SANITY_CHECK */ - -/* Do we implement the read before write bugfix ? */ -/* #define NE_RW_BUGFIX */ - -/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ -/* #define PACKETBUF_MEMSIZE 0x40 */ - -/* A zero-terminated list of I/O addresses to be probed at boot. */ - -/* ---- No user-serviceable parts below ---- */ - -static const char version[] = - "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#include "lib8390.c" - -#define NE_BASE (dev->base_addr) -#define NE_CMD 0x00 -#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ -#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ -#define NE_IO_EXTENT (ei_status.word16?0x40:0x20) - -#define NESM_START_PG 0x40 /* First page of TX buffer */ -#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ - -static int ne_probe1(struct net_device *dev, int ioaddr); - -static int ne_open(struct net_device *dev); -static int ne_close(struct net_device *dev); - -static void ne_reset_8390(struct net_device *dev); -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, - int ring_page); -static void ne_block_input(struct net_device *dev, int count, - struct sk_buff *skb, int ring_offset); -static void ne_block_output(struct net_device *dev, const int count, - const unsigned char *buf, const int start_page); - - -static u32 reg_offset[16]; - -static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) -{ - struct ei_device *ei_local = netdev_priv(dev); - int i; - unsigned char bus_width; - - bus_width = *(volatile unsigned char *)ABWCR; - bus_width &= 1 << ((base_addr >> 21) & 7); - - for (i = 0; i < ARRAY_SIZE(reg_offset); i++) - if (bus_width == 0) - reg_offset[i] = i * 2 + 1; - else - reg_offset[i] = i; - - ei_local->reg_offset = reg_offset; - return 0; -} - -static int __initdata h8300_ne_count = 0; -#ifdef CONFIG_H8300H_H8MAX -static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; -static int h8300_ne_irq[] = {EXT_IRQ4}; -#endif -#ifdef CONFIG_H8300H_AKI3068NET -static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; -static int h8300_ne_irq[] = {EXT_IRQ5}; -#endif - -static inline int init_dev(struct net_device *dev) -{ - if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) { - dev->base_addr = h8300_ne_base[h8300_ne_count]; - dev->irq = h8300_ne_irq[h8300_ne_count]; - h8300_ne_count++; - return 0; - } else - return -ENODEV; -} - -/* Probe for various non-shared-memory ethercards. - - NEx000-clone boards have a Station Address PROM (SAPROM) in the packet - buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of - the SAPROM, while other supposed NE2000 clones must be detected by their - SA prefix. - - Reading the SAPROM from a word-wide card with the 8390 set in byte-wide - mode results in doubled values, which can be detected and compensated for. - - The probe is also responsible for initializing the card and filling - in the 'dev' and 'ei_status' structures. - - We use the minimum memory size for some ethercard product lines, iff we can't - distinguish models. You can increase the packet buffer size by setting - PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: - E1010 starts at 0x100 and ends at 0x2000. - E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") - E2010 starts at 0x100 and ends at 0x4000. - E2010-x starts at 0x100 and ends at 0xffff. */ - -static int __init do_ne_probe(struct net_device *dev) -{ - unsigned int base_addr = dev->base_addr; - - /* First check any supplied i/o locations. User knows best. <cough> */ - if (base_addr > 0x1ff) /* Check a single specified location. */ - return ne_probe1(dev, base_addr); - else if (base_addr != 0) /* Don't probe at all. */ - return -ENXIO; - - return -ENODEV; -} - -static void cleanup_card(struct net_device *dev) -{ - free_irq(dev->irq, dev); - release_region(dev->base_addr, NE_IO_EXTENT); -} - -#ifndef MODULE -struct net_device * __init ne_probe(int unit) -{ - struct net_device *dev = ____alloc_ei_netdev(0); - int err; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (init_dev(dev)) - return ERR_PTR(-ENODEV); - - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - - err = init_reg_offset(dev, dev->base_addr); - if (err) - goto out; - - err = do_ne_probe(dev); - if (err) - goto out; - return dev; -out: - free_netdev(dev); - return ERR_PTR(err); -} -#endif - -static const struct net_device_ops ne_netdev_ops = { - .ndo_open = ne_open, - .ndo_stop = ne_close, - - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_rx_mode = __ei_set_multicast_list, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, - .ndo_change_mtu = eth_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, -#endif -}; - -static int __init ne_probe1(struct net_device *dev, int ioaddr) -{ - int i; - unsigned char SA_prom[16]; - int wordlength = 2; - const char *name = NULL; - int start_page, stop_page; - int reg0, ret; - static unsigned version_printed; - struct ei_device *ei_local = netdev_priv(dev); - unsigned char bus_width; - - if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) - return -EBUSY; - - reg0 = inb_p(ioaddr); - if (reg0 == 0xFF) { - ret = -ENODEV; - goto err_out; - } - - /* Do a preliminary verification that we have a 8390. */ - { - int regd; - outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); - regd = inb_p(ioaddr + EI_SHIFT(0x0d)); - outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); - outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); - inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ - if (inb_p(ioaddr + EN0_COUNTER0) != 0) { - outb_p(reg0, ioaddr + EI_SHIFT(0)); - outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ - ret = -ENODEV; - goto err_out; - } - } - - if (ei_debug && version_printed++ == 0) - printk(KERN_INFO "%s", version1); - - printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); - - /* Read the 16 bytes of station address PROM. - We must first initialize registers, similar to NS8390_init(eifdev, 0). - We can't reliably read the SAPROM address without this. - (I learned the hard way!). */ - { - struct {unsigned char value, offset; } program_seq[] = - { - {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ - {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ - {0x00, EN0_RCNTLO}, /* Clear the count regs. */ - {0x00, EN0_RCNTHI}, - {0x00, EN0_IMR}, /* Mask completion irq. */ - {0xFF, EN0_ISR}, - {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ - {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ - {32, EN0_RCNTLO}, - {0x00, EN0_RCNTHI}, - {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ - {0x00, EN0_RSARHI}, - {E8390_RREAD+E8390_START, E8390_CMD}, - }; - - for (i = 0; i < ARRAY_SIZE(program_seq); i++) - outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); - - } - bus_width = *(volatile unsigned char *)ABWCR; - bus_width &= 1 << ((ioaddr >> 21) & 7); - ei_status.word16 = (bus_width == 0); /* temporary setting */ - for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { - SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); - inb_p(ioaddr + NE_DATAPORT); /* dummy read */ - } - - start_page = NESM_START_PG; - stop_page = NESM_STOP_PG; - - if (bus_width) - wordlength = 1; - else - outb_p(0x49, ioaddr + EN0_DCFG); - - /* Set up the rest of the parameters. */ - name = (wordlength == 2) ? "NE2000" : "NE1000"; - - if (! dev->irq) { - printk(" failed to detect IRQ line.\n"); - ret = -EAGAIN; - goto err_out; - } - - /* Snarf the interrupt now. There's no point in waiting since we cannot - share and the board will usually be enabled. */ - ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev); - if (ret) { - printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); - goto err_out; - } - - dev->base_addr = ioaddr; - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = SA_prom[i]; - printk(" %pM\n", dev->dev_addr); - - printk("%s: %s found at %#x, using IRQ %d.\n", - dev->name, name, ioaddr, dev->irq); - - ei_status.name = name; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (wordlength == 2); - - ei_status.rx_start_page = start_page + TX_PAGES; -#ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; -#endif - - ei_status.reset_8390 = &ne_reset_8390; - ei_status.block_input = &ne_block_input; - ei_status.block_output = &ne_block_output; - ei_status.get_8390_hdr = &ne_get_8390_hdr; - ei_status.priv = 0; - - dev->netdev_ops = &ne_netdev_ops; - - __NS8390_init(dev, 0); - - ret = register_netdev(dev); - if (ret) - goto out_irq; - return 0; -out_irq: - free_irq(dev->irq, dev); -err_out: - release_region(ioaddr, NE_IO_EXTENT); - return ret; -} - -static int ne_open(struct net_device *dev) -{ - __ei_open(dev); - return 0; -} - -static int ne_close(struct net_device *dev) -{ - if (ei_debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - __ei_close(dev); - return 0; -} - -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ - -static void ne_reset_8390(struct net_device *dev) -{ - unsigned long reset_start_time = jiffies; - struct ei_device *ei_local = netdev_priv(dev); - - if (ei_debug > 1) - printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); - - /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ - outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); - - ei_status.txing = 0; - ei_status.dmaing = 0; - - /* This check _should_not_ be necessary, omit eventually. */ - while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) - if (time_after(jiffies, reset_start_time + 2*HZ/100)) { - printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); - break; - } - outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ -} - -/* Grab the 8390 specific header. Similar to the block_input routine, but - we don't need to be concerned with ring wrap as the header will be at - the start of a page, so we optimize accordingly. */ - -static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); - outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); - outb_p(0, NE_BASE + EN0_RCNTHI); - outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ - outb_p(ring_page, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - - if (ei_status.word16) { - int len; - unsigned short *p = (unsigned short *)hdr; - for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) - *p++ = inw(NE_BASE + NE_DATAPORT); - } else - insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; - - le16_to_cpus(&hdr->count); -} - -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using outb. */ - -static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) -{ - struct ei_device *ei_local = netdev_priv(dev); -#ifdef NE_SANITY_CHECK - int xfer_count = count; -#endif - char *buf = skb->data; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " - "[DMAstat:%d][irqlock:%d].\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); - outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); - outb_p(count >> 8, NE_BASE + EN0_RCNTHI); - outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); - outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - if (ei_status.word16) - { - int len; - unsigned short *p = (unsigned short *)buf; - for (len = count>>1; len > 0; len--) - *p++ = inw(NE_BASE + NE_DATAPORT); - if (count & 0x01) - { - buf[count-1] = inb(NE_BASE + NE_DATAPORT); -#ifdef NE_SANITY_CHECK - xfer_count++; -#endif - } - } else { - insb(NE_BASE + NE_DATAPORT, buf, count); - } - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. If you see - this message you either 1) have a slightly incompatible clone - or 2) have noise/speed problems with your bus. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here - -- it's broken for Rx on some cards! */ - int high = inb_p(NE_BASE + EN0_RSARHI); - int low = inb_p(NE_BASE + EN0_RSARLO); - addr = (high << 8) + low; - if (((ring_offset + xfer_count) & 0xff) == low) - break; - } while (--tries > 0); - if (tries <= 0) - printk(KERN_WARNING "%s: RX transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, ring_offset + xfer_count, addr); - } -#endif - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - -static void ne_block_output(struct net_device *dev, int count, - const unsigned char *buf, const int start_page) -{ - struct ei_device *ei_local = netdev_priv(dev); - unsigned long dma_start; -#ifdef NE_SANITY_CHECK - int retries = 0; -#endif - - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - - if (ei_status.word16 && (count & 0x01)) - count++; - - /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) - { - printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." - "[DMAstat:%d][irqlock:%d]\n", - dev->name, ei_status.dmaing, ei_status.irqlock); - return; - } - ei_status.dmaing |= 0x01; - /* We should already be in page 0, but to be safe... */ - outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); - -#ifdef NE_SANITY_CHECK -retry: -#endif - -#ifdef NE8390_RW_BUGFIX - /* Handle the read-before-write bug the same way as the - Crynwr packet driver -- the NatSemi method doesn't work. - Actually this doesn't always work either, but if you have - problems with your NEx000 this is better than nothing! */ - - outb_p(0x42, NE_BASE + EN0_RCNTLO); - outb_p(0x00, NE_BASE + EN0_RCNTHI); - outb_p(0x42, NE_BASE + EN0_RSARLO); - outb_p(0x00, NE_BASE + EN0_RSARHI); - outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); - /* Make certain that the dummy read has occurred. */ - udelay(6); -#endif - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); - - /* Now the normal output. */ - outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); - outb_p(count >> 8, NE_BASE + EN0_RCNTHI); - outb_p(0x00, NE_BASE + EN0_RSARLO); - outb_p(start_page, NE_BASE + EN0_RSARHI); - - outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); - if (ei_status.word16) { - int len; - unsigned short *p = (unsigned short *)buf; - for (len = count>>1; len > 0; len--) - outw(*p++, NE_BASE + NE_DATAPORT); - } else { - outsb(NE_BASE + NE_DATAPORT, buf, count); - } - - dma_start = jiffies; - -#ifdef NE_SANITY_CHECK - /* This was for the ALPHA version only, but enough people have - been encountering problems so it is still here. */ - - if (ei_debug > 1) - { - /* DMA termination address check... */ - int addr, tries = 20; - do { - int high = inb_p(NE_BASE + EN0_RSARHI); - int low = inb_p(NE_BASE + EN0_RSARLO); - addr = (high << 8) + low; - if ((start_page << 8) + count == addr) - break; - } while (--tries > 0); - - if (tries <= 0) - { - printk(KERN_WARNING "%s: Tx packet transfer address mismatch," - "%#4.4x (expected) vs. %#4.4x (actual).\n", - dev->name, (start_page << 8) + count, addr); - if (retries++ == 0) - goto retry; - } - } -#endif - - while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) - if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ - printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); - ne_reset_8390(dev); - __NS8390_init(dev,1); - break; - } - - outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; -} - - -#ifdef MODULE -#define MAX_NE_CARDS 1 /* Max number of NE cards per module */ -static struct net_device *dev_ne[MAX_NE_CARDS]; -static int io[MAX_NE_CARDS]; -static int irq[MAX_NE_CARDS]; -static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ - -module_param_array(io, int, NULL, 0); -module_param_array(irq, int, NULL, 0); -module_param_array(bad, int, NULL, 0); -MODULE_PARM_DESC(io, "I/O base address(es)"); -MODULE_PARM_DESC(irq, "IRQ number(s)"); -MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); -MODULE_LICENSE("GPL"); - -/* This is set up so that no ISA autoprobe takes place. We can't guarantee -that the ne2k probe is the last 8390 based probe to take place (as it -is at boot) and so the probe will get confused by any other 8390 cards. -ISA device autoprobes on a running machine are not recommended anyway. */ - -int init_module(void) -{ - int this_dev, found = 0; - int err; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = ____alloc_ei_netdev(0); - if (!dev) - break; - if (io[this_dev]) { - dev->irq = irq[this_dev]; - dev->mem_end = bad[this_dev]; - dev->base_addr = io[this_dev]; - } else { - dev->base_addr = h8300_ne_base[this_dev]; - dev->irq = h8300_ne_irq[this_dev]; - } - err = init_reg_offset(dev, dev->base_addr); - if (!err) { - if (do_ne_probe(dev) == 0) { - dev_ne[found++] = dev; - continue; - } - } - free_netdev(dev); - if (found) - break; - if (io[this_dev] != 0) - printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); - else - printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); - return -ENXIO; - } - if (found) - return 0; - return -ENODEV; -} - -void cleanup_module(void) -{ - int this_dev; - - for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = dev_ne[this_dev]; - if (dev) { - unregister_netdev(dev); - cleanup_card(dev); - free_netdev(dev); - } - } -} -#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 92201080e07..fc14a85e4d5 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -389,9 +389,7 @@ err_out_free_netdev: free_netdev (dev); err_out_free_res: release_region (ioaddr, NE_IO_EXTENT); - pci_set_drvdata (pdev, NULL); return -ENODEV; - } /* @@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev) release_region(dev->base_addr, NE_IO_EXTENT); free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 8b04bfc20cf..171d73c1d3c 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev, return 0; err_out_cleardev: - pci_set_drvdata(pdev, NULL); iounmap(base); err_out_free_res: pci_release_regions (pdev); @@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev) iounmap(np->base); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); /* Will also free np!! */ } diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h index 7a07ee07906..6dec86ac97c 100644 --- a/drivers/net/ethernet/adi/bfin_mac.h +++ b/drivers/net/ethernet/adi/bfin_mac.h @@ -104,6 +104,6 @@ struct bfin_mac_local { #endif }; -extern int bfin_get_ether_addr(char *addr); +int bfin_get_ether_addr(char *addr); #endif diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 50b853a79d7..46dfb1378c1 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev) if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); - if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, - 0, dev->name, dev)) + if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) return -EAGAIN; /* Initialize EMAC board */ @@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev) emac_shutdown(ndev); + free_irq(ndev->irq, ndev); + return 0; } diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h index 0a5837b9642..ae33a99bf47 100644 --- a/drivers/net/ethernet/amd/7990.h +++ b/drivers/net/ethernet/amd/7990.h @@ -242,13 +242,13 @@ struct lance_private #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) /* Now the prototypes we export */ -extern int lance_open(struct net_device *dev); -extern int lance_close (struct net_device *dev); -extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); -extern void lance_set_multicast (struct net_device *dev); -extern void lance_tx_timeout(struct net_device *dev); +int lance_open(struct net_device *dev); +int lance_close (struct net_device *dev); +int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); +void lance_set_multicast (struct net_device *dev); +void lance_tx_timeout(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER -extern void lance_poll(struct net_device *dev); +void lance_poll(struct net_device *dev); #endif #endif /* ndef _7990_H */ diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 1b1429d5d5c..d042511bdc1 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } static void amd8111e_config_ipg(struct net_device* dev) @@ -1967,7 +1966,6 @@ err_free_reg: err_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return err; } diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 10ceca523fc..e07ce5ff2d4 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev, switch( lp->cardtype ) { case OLD_RIEBL: /* No ethernet address! (Set some default address) */ - memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 ); + memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN); break; case NEW_RIEBL: - lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 ); + lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN); break; case PAM_CARD: i = IO->eeprom; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 91d52b49584..427c148bb64 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev) aup->phy1_search_mac0 = 1; } else { if (is_valid_ether_addr(pd->mac)) { - memcpy(dev->dev_addr, pd->mac, 6); + memcpy(dev->dev_addr, pd->mac, ETH_ALEN); } else { /* Set a random MAC since no valid provided by platform_data. */ eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 94edc9c6fbb..57397295887 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len) } clen = len & 1; - rtp = tp; - rfp = fp; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; while (clen--) { *rtp++ = *rfp++; } @@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len) * do the rest, if any. */ clen = len & 15; - rtp = (unsigned char *) tp; - rfp = (unsigned char *) fp; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; while (clen--) { *rtp++ = *rfp++; } @@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len) clen = len & 1; - rtp = tp; - rfp = fp; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; while (clen--) { *rtp++ = *rfp++; @@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len) * do the rest, if any. */ clen = len & 15; - rtp = (unsigned char *) tp; - rfp = (unsigned char *) fp; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; while (clen--) { *rtp++ = *rfp++; } @@ -725,7 +725,6 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id) { struct net_device *dev = dev_id; - clear_ioasic_dma_irq(irq); printk(KERN_ERR "%s: DMA error\n", dev->name); return IRQ_HANDLED; } @@ -812,7 +811,7 @@ static int lance_open(struct net_device *dev) if (lp->dma_irq >= 0) { unsigned long flags; - if (request_irq(lp->dma_irq, lance_dma_merr_int, 0, + if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT, "lance error", dev)) { free_irq(dev->irq, dev); printk("%s: Can't get DMA IRQ %d\n", dev->name, diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 5c728436b85..256f590f6bb 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -754,7 +754,7 @@ lance_open(struct net_device *dev) int i; if (dev->irq == 0 || - request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) { + request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) { return -EAGAIN; } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 2d8e2881977..38492e0b704 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } - memcpy(dev->dev_addr, promaddr, 6); + memcpy(dev->dev_addr, promaddr, ETH_ALEN); } } @@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev) lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index a597b766f08..daae0e01625 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev) if (skb != NULL) { data = skb_put(skb, ETHERMINPACKET); memset(data, 0, ETHERMINPACKET); - memcpy(data, dev->dev_addr, 6); - memcpy(data+6, dev->dev_addr, 6); + memcpy(data, dev->dev_addr, ETH_ALEN); + memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN); bmac_transmit_packet(skb, dev); } spin_unlock_irqrestore(&bp->lock, flags); diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 4ce8ceb6220..58a200df4c3 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -211,6 +211,7 @@ static int mace_probe(struct platform_device *pdev) mp = netdev_priv(dev); mp->device = &pdev->dev; + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = (u32)MACE_BASE; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 9e160148726..b2ffad1304d 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -628,12 +628,12 @@ static const struct net_device_ops arc_emac_netdev_ops = { static int arc_emac_probe(struct platform_device *pdev) { - struct resource res_regs, res_irq; + struct resource res_regs; struct device_node *phy_node; struct arc_emac_priv *priv; struct net_device *ndev; const char *mac_addr; - unsigned int id, clock_frequency; + unsigned int id, clock_frequency, irq; int err; if (!pdev->dev.of_node) @@ -661,8 +661,8 @@ static int arc_emac_probe(struct platform_device *pdev) } /* Get IRQ from device tree */ - err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq); - if (!err) { + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) { dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n"); return -ENODEV; } @@ -671,6 +671,7 @@ static int arc_emac_probe(struct platform_device *pdev) if (!ndev) return -ENOMEM; + platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &arc_emac_netdev_ops; @@ -711,7 +712,7 @@ static int arc_emac_probe(struct platform_device *pdev) goto out; } - ndev->irq = res_irq.start; + ndev->irq = irq; dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); /* Register interrupt handler for device */ @@ -725,10 +726,10 @@ static int arc_emac_probe(struct platform_device *pdev) /* Get MAC address from device tree */ mac_addr = of_get_mac_address(pdev->dev.of_node); - if (!mac_addr || !is_valid_ether_addr(mac_addr)) - eth_hw_addr_random(ndev); - else + if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + else + eth_hw_addr_random(ndev); dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index fc95b235e21..c3c4c266b84 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(alx->dev); } @@ -1389,6 +1388,9 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index 0f0556526ba..7f9369a3b37 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -600,7 +600,7 @@ struct atl1c_adapter { extern char atl1c_driver_name[]; extern char atl1c_driver_version[]; -extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); -extern s32 atl1c_reset_hw(struct atl1c_hw *hw); -extern void atl1c_set_ethtool_ops(struct net_device *netdev); +void atl1c_reinit_locked(struct atl1c_adapter *adapter); +s32 atl1c_reset_hw(struct atl1c_hw *hw); +void atl1c_set_ethtool_ops(struct net_device *netdev); #endif /* _ATL1C_H_ */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 3ef7092e3f1..1cda49a28f7 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) { int i; - int ret = false; + bool ret = false; u32 otp_ctrl_data; u32 control; u32 data; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index b5fd934585e..1b0fe2d04a0 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -499,10 +499,10 @@ struct atl1e_adapter { extern char atl1e_driver_name[]; extern char atl1e_driver_version[]; -extern void atl1e_check_options(struct atl1e_adapter *adapter); -extern int atl1e_up(struct atl1e_adapter *adapter); -extern void atl1e_down(struct atl1e_adapter *adapter); -extern void atl1e_reinit_locked(struct atl1e_adapter *adapter); -extern s32 atl1e_reset_hw(struct atl1e_hw *hw); -extern void atl1e_set_ethtool_ops(struct net_device *netdev); +void atl1e_check_options(struct atl1e_adapter *adapter); +int atl1e_up(struct atl1e_adapter *adapter); +void atl1e_down(struct atl1e_adapter *adapter); +void atl1e_reinit_locked(struct atl1e_adapter *adapter); +s32 atl1e_reset_hw(struct atl1e_hw *hw); +void atl1e_set_ethtool_ops(struct net_device *netdev); #endif /* _ATL1_E_H_ */ diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 1966444590f..7a73f3a9fcb 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev) } } +static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + + if (features & NETIF_F_RXALL) { + /* enable RX of ALL frames */ + *mac_ctrl_data |= MAC_CTRL_DBG; + } else { + /* disable RX of ALL frames */ + *mac_ctrl_data &= ~MAC_CTRL_DBG; + } +} + +static void atl1e_rx_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_rx_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + + static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { @@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev, if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl1e_vlan_mode(netdev, features); + if (changed & NETIF_F_RXALL) + atl1e_rx_mode(netdev, features); + + return 0; } @@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) value |= MAC_CTRL_PROMIS_EN; if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; - + if (netdev->features & NETIF_F_RXALL) + value |= MAC_CTRL_DBG; AT_WRITE_REG(hw, REG_MAC_CTRL, value); } @@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, rx_page_desc[que].rx_nxseq++; /* error packet */ - if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { + if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) && + !(netdev->features & NETIF_F_RXALL)) { if (prrs->err_flag & (RRS_ERR_BAD_CRC | RRS_ERR_DRIBBLE | RRS_ERR_CODE | RRS_ERR_TRUNC)) { @@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, } packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & - RRS_PKT_SIZE_MASK) - 4; /* CRC */ + RRS_PKT_SIZE_MASK); + if (likely(!(netdev->features & NETIF_F_RXFCS))) + packet_size -= 4; /* CRC */ + skb = netdev_alloc_skb_ip_align(netdev, packet_size); if (skb == NULL) goto skip_pkt; @@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) NETIF_F_HW_VLAN_CTAG_RX; netdev->features = netdev->hw_features | NETIF_F_LLTX | NETIF_F_HW_VLAN_CTAG_TX; - + /* not enabled by default */ + netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; return 0; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h index 3ebe19f7242..2f27d4c4c3a 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.h +++ b/drivers/net/ethernet/atheros/atlx/atl2.h @@ -42,7 +42,7 @@ #include "atlx.h" #ifdef ETHTOOL_OPS_COMPAT -extern int ethtool_ioctl(struct ifreq *ifr); +int ethtool_ioctl(struct ifreq *ifr); #endif #define PCI_COMMAND_REGISTER PCI_COMMAND diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 9b017d9c58e..90e54d5488d 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque) static void b44_tx(struct b44 *bp) { u32 cur, cons; + unsigned bytes_compl = 0, pkts_compl = 0; cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; cur /= sizeof(struct dma_desc); @@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp) skb->len, DMA_TO_DEVICE); rp->skb = NULL; + + bytes_compl += skb->len; + pkts_compl++; + dev_kfree_skb_irq(skb); } + netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); bp->tx_cons = cons; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) @@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bp->flags & B44_FLAG_REORDER_BUG) br32(bp, B44_DMATX_PTR); + netdev_sent_queue(dev, skb->len); + if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); @@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind) val = br32(bp, B44_ENET_CTRL); bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); + + netdev_reset_queue(bp->dev); } static int b44_open(struct net_device *dev) @@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp) * valid PHY address. */ bp->phy_addr &= 0x1F; - memcpy(bp->dev->dev_addr, addr, 6); + memcpy(bp->dev->dev_addr, addr, ETH_ALEN); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ pr_err("Invalid MAC address found in EEPROM\n"); @@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev, goto err_out_free_dev; } - if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || - dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { + if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n"); goto err_out_powerdown; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 249468f9536..e2aa09ce6af 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, dma_desc->ctl0 = cpu_to_le32(ctl0); dma_desc->ctl1 = cpu_to_le32(ctl1); + netdev_sent_queue(net_dev, skb->len); + wmb(); /* Increase ring->end to point empty slot. We tell hardware the first @@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) struct device *dma_dev = bgmac->core->dma_dev; int empty_slot; bool freed = false; + unsigned bytes_compl = 0, pkts_compl = 0; /* The last slot that hardware didn't consume yet */ empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); @@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) slot->skb->len, DMA_TO_DEVICE); slot->dma_addr = 0; + bytes_compl += slot->skb->len; + pkts_compl++; + /* Free memory! :) */ dev_kfree_skb(slot->skb); slot->skb = NULL; @@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) freed = true; } + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); + if (freed && netif_queue_stopped(bgmac->net_dev)) netif_wake_queue(bgmac->net_dev); } @@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { struct device *dma_dev = bgmac->core->dma_dev; + struct sk_buff *skb; + dma_addr_t dma_addr; struct bgmac_rx_header *rx; /* Alloc skb */ - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!slot->skb) + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); + if (!skb) return -ENOMEM; /* Poison - if everything goes fine, hardware will overwrite it */ - rx = (struct bgmac_rx_header *)slot->skb->data; + rx = (struct bgmac_rx_header *)skb->data; rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); /* Map skb for the DMA */ - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dma_dev, slot->dma_addr)) { + dma_addr = dma_map_single(dma_dev, skb->data, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { bgmac_err(bgmac, "DMA mapping error\n"); + dev_kfree_skb(skb); return -ENOMEM; } + + /* Update the slot */ + slot->skb = skb; + slot->dma_addr = dma_addr; + if (slot->dma_addr & 0xC0000000) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); return 0; } +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, int desc_idx) +{ + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; + u32 ctl0 = 0, ctl1 = 0; + + if (desc_idx == ring->num_slots - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; + /* Is there any BGMAC device that requires extension? */ + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & + * B43_DMA64_DCTL1_ADDREXT_MASK; + */ + + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); +} + static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int weight) { @@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct sk_buff *skb = slot->skb; - struct sk_buff *new_skb; struct bgmac_rx_header *rx; u16 len, flags; @@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); - /* Check for poison and drop or pass the packet */ - if (len == 0xdead && flags == 0xbeef) { - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", - ring->start); - } else { + do { + dma_addr_t old_dma_addr = slot->dma_addr; + int err; + + /* Check for poison and drop or pass the packet */ + if (len == 0xdead && flags == 0xbeef) { + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); + dma_sync_single_for_device(dma_dev, + slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + break; + } + /* Omit CRC. */ len -= ETH_FCS_LEN; - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); - if (new_skb) { - skb_put(new_skb, len); - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, - new_skb->data, - len); - skb_checksum_none_assert(skb); - new_skb->protocol = - eth_type_trans(new_skb, bgmac->net_dev); - netif_receive_skb(new_skb); - handled++; - } else { - bgmac->net_dev->stats.rx_dropped++; - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n"); + /* Prepare new skb as replacement */ + err = bgmac_dma_rx_skb_for_slot(bgmac, slot); + if (err) { + /* Poison the old skb */ + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + + dma_sync_single_for_device(dma_dev, + slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + break; } + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); - /* Poison the old skb */ - rx->len = cpu_to_le16(0xdead); - rx->flags = cpu_to_le16(0xbeef); - } + /* Unmap old skb, we'll pass it to the netfif */ + dma_unmap_single(dma_dev, old_dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET); - /* Make it back accessible to the hardware */ - dma_sync_single_for_device(dma_dev, slot->dma_addr, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bgmac->net_dev); + netif_receive_skb(skb); + handled++; + } while (0); if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; @@ -495,8 +543,6 @@ err_dma_free: static void bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; - struct bgmac_dma_desc *dma_desc; - u32 ctl0, ctl1; int i; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { @@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac *bgmac) if (ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; - j++, dma_desc++) { - ctl0 = ctl1 = 0; - - if (j == ring->num_slots - 1) - ctl0 |= BGMAC_DESC_CTL0_EOT; - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; - /* Is there any BGMAC device that requires extension? */ - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & - * B43_DMA64_DCTL1_ADDREXT_MASK; - */ - - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr)); - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr)); - dma_desc->ctl0 = cpu_to_le32(ctl0); - dma_desc->ctl1 = cpu_to_le32(ctl1); - } + for (j = 0; j < ring->num_slots; j++) + bgmac_dma_rx_setup_desc(bgmac, ring, j); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, ring->index_base + @@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_miiconfig(bgmac); bgmac_phy_init(bgmac); + netdev_reset_queue(bgmac->net_dev); + bgmac->int_status = 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e838a3f74b6..d9980ad00b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) if (!skb) return -ENOMEM; packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, 6); - memset(packet + 6, 0x0, 8); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0x0, 8); for (i = 14; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); @@ -8413,7 +8413,6 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return rc; @@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - memcpy(dev->dev_addr, bp->mac_addr, 6); + memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | @@ -8546,7 +8545,6 @@ error: pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_free: free_netdev(dev); return rc; @@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static int diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 97b3d32a98b..a1f66e2c9a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1197,8 +1197,9 @@ union cdu_context { /* TM (timers) host DB constants */ #define TM_ILT_PAGE_SZ_HW 0 #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ -/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ -#define TM_CONN_NUM 1024 +#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \ + BNX2X_VF_CIDS + \ + CNIC_ISCSI_CID_MAX) #define TM_ILT_SZ (8 * TM_CONN_NUM) #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) @@ -1375,7 +1376,6 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, - BNX2X_SP_RTNL_TX_RESUME, }; struct bnx2x_prev_path_list { @@ -1527,7 +1527,6 @@ struct bnx2x { #define PCI_32BIT_FLAG (1 << 1) #define ONE_PORT_FLAG (1 << 2) #define NO_WOL_FLAG (1 << 3) -#define USING_DAC_FLAG (1 << 4) #define USING_MSIX_FLAG (1 << 5) #define USING_MSI_FLAG (1 << 6) #define DISABLE_MSI_FLAG (1 << 7) @@ -1546,6 +1545,7 @@ struct bnx2x { #define IS_VF_FLAG (1 << 22) #define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BC_SUPPORTS_RMMOD_CMD (1 << 24) +#define HAS_PHYS_PORT_ID (1 << 25) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1621,7 +1621,7 @@ struct bnx2x { u16 rx_ticks_int; u16 rx_ticks; /* Maximal coalescing timeout in us */ -#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) +#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR) u32 lin_cnt; @@ -1876,6 +1876,8 @@ struct bnx2x { u32 dump_preset_idx; bool stats_started; struct semaphore stats_sema; + + u8 phys_port_id[ETH_ALEN]; }; /* Tx queues may be less or equal to Rx queues */ @@ -2072,7 +2074,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type); -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp); /* FLR related routines */ u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); @@ -2231,7 +2234,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define BNX2X_NUM_TESTS_SF 7 #define BNX2X_NUM_TESTS_MF 3 #define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ - BNX2X_NUM_TESTS_SF) + IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF) #define BNX2X_PHY_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 1 @@ -2491,11 +2494,9 @@ enum { #define NUM_MACS 8 -enum bnx2x_pci_bus_speed { - BNX2X_PCI_LINK_SPEED_2500 = 2500, - BNX2X_PCI_LINK_SPEED_5000 = 5000, - BNX2X_PCI_LINK_SPEED_8000 = 8000 -}; - void bnx2x_set_local_cmng(struct bnx2x *bp); + +#define MCPR_SCRATCH_BASE(bp) \ + (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e66beff2704..ec96130533c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, } } #endif + skb_record_rx_queue(skb, fp->rx_queue); napi_gro_receive(&fp->napi, skb); } @@ -2544,10 +2545,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } } - /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) - LOAD_ERROR_EXIT(bp, load_error0); - /* need to be done after alloc mem, since it's self adjusting to amount * of memory available for RSS queues */ @@ -2557,6 +2554,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) LOAD_ERROR_EXIT(bp, load_error0); } + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + LOAD_ERROR_EXIT(bp, load_error0); + /* request pf to initialize status blocks */ if (IS_VF(bp)) { rc = bnx2x_vfpf_init(bp); @@ -2811,8 +2812,8 @@ load_error1: if (IS_PF(bp)) bnx2x_clear_pf_load(bp); load_error0: - bnx2x_free_fp_mem(bp); bnx2x_free_fw_stats_mem(bp); + bnx2x_free_fp_mem(bp); bnx2x_free_mem(bp); return rc; @@ -2958,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); if (CNIC_LOADED(bp)) @@ -3255,14 +3260,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) if (prot == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; - if (skb_is_gso_v6(skb)) { - rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); - if (rc & XMIT_CSUM_ENC) - rc |= XMIT_GSO_ENC_V6; - } else if (skb_is_gso(skb)) { - rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); - if (rc & XMIT_CSUM_ENC) - rc |= XMIT_GSO_ENC_V4; + if (skb_is_gso(skb)) { + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + } } return rc; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d882..fdace204b05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* ets may affect cmng configuration: reinit it in hw */ bnx2x_set_local_cmng(bp); - - set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 324de5f0533..32d0f1435fb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); int regdump_len = 0; + if (IS_VF(bp)) + return 0; + regdump_len = __bnx2x_get_regs_len(bp); regdump_len *= 4; regdump_len += sizeof(struct dump_header); @@ -891,17 +894,8 @@ static void bnx2x_get_regs(struct net_device *dev, * will re-enable parity attentions right after the dump. */ - /* Disable parity on path 0 */ - bnx2x_pretend_func(bp, 0); bnx2x_disable_blocks_parity(bp); - /* Disable parity on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_disable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; dump_hdr.preset = DUMP_ALL_PRESETS; dump_hdr.version = BNX2X_DUMP_VERSION; @@ -928,18 +922,9 @@ static void bnx2x_get_regs(struct net_device *dev, /* Actually read the registers */ __bnx2x_get_regs(bp, p); - /* Re-enable parity attentions on path 0 */ - bnx2x_pretend_func(bp, 0); + /* Re-enable parity attentions */ bnx2x_clear_blocks_parity(bp); bnx2x_enable_blocks_parity(bp); - - /* Re-enable parity attentions on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_clear_blocks_parity(bp); - bnx2x_enable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); } static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) @@ -993,17 +978,8 @@ static int bnx2x_get_dump_data(struct net_device *dev, * will re-enable parity attentions right after the dump. */ - /* Disable parity on path 0 */ - bnx2x_pretend_func(bp, 0); bnx2x_disable_blocks_parity(bp); - /* Disable parity on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_disable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; dump_hdr.preset = bp->dump_preset_idx; dump_hdr.version = BNX2X_DUMP_VERSION; @@ -1032,19 +1008,10 @@ static int bnx2x_get_dump_data(struct net_device *dev, /* Actually read the registers */ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); - /* Re-enable parity attentions on path 0 */ - bnx2x_pretend_func(bp, 0); - bnx2x_clear_blocks_parity(bp); - bnx2x_enable_blocks_parity(bp); - - /* Re-enable parity attentions on path 1 */ - bnx2x_pretend_func(bp, 1); + /* Re-enable parity attentions */ bnx2x_clear_blocks_parity(bp); bnx2x_enable_blocks_parity(bp); - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - return 0; } @@ -2900,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev, memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + if (bnx2x_test_nvram(bp) != 0) { + if (!IS_MF(bp)) + buf[4] = 1; + else + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test when interface is down\n"); + DP(BNX2X_MSG_ETHTOOL, "Interface is down\n"); return; } @@ -2964,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev, /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up, is_serdes); } - if (bnx2x_test_nvram(bp) != 0) { - if (!IS_MF(bp)) - buf[4] = 1; - else - buf[0] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + if (bnx2x_test_intr(bp) != 0) { if (!IS_MF(bp)) buf[5] = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 32767f6aa33..cf1df8b62e2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */ #define SHARED_HW_CFG_LED_MAC4 0x000c0000 #define SHARED_HW_CFG_LED_PHY8 0x000d0000 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 76df015f486..c2dfea7968f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -640,23 +640,35 @@ static const struct { * [30] MCP Latched ump_tx_parity * [31] MCP Latched scpad_parity */ -#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) /* Below registers control the MCP parity attention output. When * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are * enabled, when cleared - disabled. */ -static const u32 mcp_attn_ctl_regs[] = { - MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_0, - MISC_REG_AEU_ENABLE4_PXP_0, - MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_1, - MISC_REG_AEU_ENABLE4_PXP_1 +static const struct { + u32 addr; + u32 bits; +} mcp_attn_ctl_regs[] = { + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } }; static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) @@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) u32 reg_val; for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { - reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); if (enable) - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val |= mcp_attn_ctl_regs[i].bits; else - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val &= ~mcp_attn_ctl_regs[i].bits; - REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); + REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 51468227bf3..20dcc02431c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params) } static int bnx2x_bsc_read(struct link_params *params, - struct bnx2x_phy *phy, + struct bnx2x *bp, u8 sl_devid, u16 sl_addr, u8 lc_addr, @@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params, { u32 val, i; int rc = 0; - struct bnx2x *bp = params->bp; if (xfer_cnt > 16) { DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", @@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params, * intended override. */ break; - } else + } else { + u32 nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? + (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - hw_led_mode); + nig_led_mode); + } REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); /* Set blinking rate to ~15.9Hz */ @@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, usleep_range(1000, 2000); bnx2x_warpcore_power_module(params, 1); } - rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt, + rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, data_array); } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 0x40); } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, - 0x80); + val); /* Tell LED3 to blink on source */ bnx2x_cl45_read(bp, phy, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 82b658d8c04..814d0eca9b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, } /* issue a dmae command over the init-channel and wait for completion */ -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp) { - u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; @@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) spin_lock_bh(&bp->dmae_lock); /* reset completion */ - *wb_comp = 0; + *comp = 0; /* post the command on the channel used for initializations */ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); /* wait for completion */ udelay(5); - while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!cnt || (bp->recovery_state != BNX2X_RECOVERY_DONE && @@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) cnt--; udelay(50); } - if (*wb_comp & DMAE_PCI_ERR_FLAG) { + if (*comp & DMAE_PCI_ERR_FLAG) { BNX2X_ERR("DMAE PCI error!\n"); rc = DMAE_PCI_ERROR; } @@ -574,10 +574,12 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.len = len32; /* issue the command and wait for completion */ - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -611,10 +613,12 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.len = len32; /* issue the command and wait for completion */ - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -751,6 +755,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp) return rc; } +#define MCPR_TRACE_BUFFER_SIZE (0x800) +#define SCRATCH_BUFFER_SIZE(bp) \ + (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) + void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) { u32 addr, val; @@ -775,7 +783,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) trace_shmem_base = bp->common.shmem_base; else trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); - addr = trace_shmem_base - 0x800; + + /* sanity */ + if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || + trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + + SCRATCH_BUFFER_SIZE(bp)) { + BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", + trace_shmem_base); + return; + } + + addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; /* validate TRCB signature */ mark = REG_RD(bp, addr); @@ -787,14 +805,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) /* read cyclic buffer pointer */ addr += 4; mark = REG_RD(bp, addr); - mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) - + ((mark + 0x3) & ~0x3) - 0x08000000; + mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; + if (mark >= trace_shmem_base || mark < addr + 4) { + BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); + return; + } printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); printk("%s", lvl); /* dump buffer after the mark */ - for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { + for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; @@ -4280,65 +4301,60 @@ static void _print_next_block(int idx, const char *blk) pr_cont("%s%s", idx ? ", " : "", blk); } -static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "BRB"); + res |= true; /* Each bit is real error! */ + + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + _print_next_block((*par_num)++, "BRB"); _print_parity(bp, BRB1_REG_BRB1_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + _print_next_block((*par_num)++, + "PARSER"); _print_parity(bp, PRS_REG_PRS_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + _print_next_block((*par_num)++, "TSDM"); _print_parity(bp, TSDM_REG_TSDM_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + _print_next_block((*par_num)++, "SEARCHER"); _print_parity(bp, SRC_REG_SRC_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TCM"); - _print_parity(bp, - TCM_REG_TCM_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + _print_next_block((*par_num)++, "TCM"); + _print_parity(bp, TCM_REG_TCM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "TSEMI"); _print_parity(bp, TSEM_REG_TSEM_PRTY_STS_0); _print_parity(bp, TSEM_REG_TSEM_PRTY_STS_1); - } - break; - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "XPB"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, "XPB"); _print_parity(bp, GRCBASE_XPB + PB_REG_PB_PRTY_STS); + break; } - break; } /* Clear the bit */ @@ -4346,53 +4362,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, - int par_num, bool *global, +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { + res |= true; /* Each bit is real error! */ switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "PBF"); + _print_next_block((*par_num)++, "PBF"); _print_parity(bp, PBF_REG_PBF_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "QM"); + _print_next_block((*par_num)++, "QM"); _print_parity(bp, QM_REG_QM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "TM"); + _print_next_block((*par_num)++, "TM"); _print_parity(bp, TM_REG_TM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XSDM"); + _print_next_block((*par_num)++, "XSDM"); _print_parity(bp, XSDM_REG_XSDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XCM"); + _print_next_block((*par_num)++, "XCM"); _print_parity(bp, XCM_REG_XCM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XSEMI"); + _print_next_block((*par_num)++, + "XSEMI"); _print_parity(bp, XSEM_REG_XSEM_PRTY_STS_0); _print_parity(bp, @@ -4401,7 +4423,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) { - _print_next_block(par_num++, + _print_next_block((*par_num)++, "DOORBELLQ"); _print_parity(bp, DORQ_REG_DORQ_PRTY_STS); @@ -4409,7 +4431,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "NIG"); + _print_next_block((*par_num)++, "NIG"); if (CHIP_IS_E1x(bp)) { _print_parity(bp, NIG_REG_NIG_PRTY_STS); @@ -4423,32 +4445,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "VAUX PCI CORE"); *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "DEBUG"); + _print_next_block((*par_num)++, + "DEBUG"); _print_parity(bp, DBG_REG_DBG_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "USDM"); + _print_next_block((*par_num)++, "USDM"); _print_parity(bp, USDM_REG_USDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "UCM"); + _print_next_block((*par_num)++, "UCM"); _print_parity(bp, UCM_REG_UCM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "USEMI"); + _print_next_block((*par_num)++, + "USEMI"); _print_parity(bp, USEM_REG_USEM_PRTY_STS_0); _print_parity(bp, @@ -4457,21 +4481,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "UPB"); + _print_next_block((*par_num)++, "UPB"); _print_parity(bp, GRCBASE_UPB + PB_REG_PB_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "CSDM"); + _print_next_block((*par_num)++, "CSDM"); _print_parity(bp, CSDM_REG_CSDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "CCM"); + _print_next_block((*par_num)++, "CCM"); _print_parity(bp, CCM_REG_CCM_PRTY_STS); } break; @@ -4482,80 +4506,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CSEMI"); + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "CSEMI"); _print_parity(bp, CSEM_REG_CSEM_PRTY_STS_0); _print_parity(bp, CSEM_REG_CSEM_PRTY_STS_1); - } - break; - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PXP"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + _print_next_block((*par_num)++, "PXP"); _print_parity(bp, PXP_REG_PXP_PRTY_STS); _print_parity(bp, PXP2_REG_PXP2_PRTY_STS_0); _print_parity(bp, PXP2_REG_PXP2_PRTY_STS_1); - } - break; - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "PXPPCICLOCKCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CFC"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + _print_next_block((*par_num)++, "CFC"); _print_parity(bp, CFC_REG_CFC_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + _print_next_block((*par_num)++, "CDU"); _print_parity(bp, CDU_REG_CDU_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + _print_next_block((*par_num)++, "DMAE"); _print_parity(bp, DMAE_REG_DMAE_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + _print_next_block((*par_num)++, "IGU"); if (CHIP_IS_E1x(bp)) _print_parity(bp, HC_REG_HC_PRTY_STS); else _print_parity(bp, IGU_REG_IGU_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "MISC"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + _print_next_block((*par_num)++, "MISC"); _print_parity(bp, MISC_REG_MISC_PRTY_STS); + break; } - break; } /* Clear the bit */ @@ -4563,40 +4580,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, - bool *global, bool print) +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) { - int i = 0; - u32 cur_bit = 0; + bool res = false; + u32 cur_bit; + int i; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) - _print_next_block(par_num++, "MCP ROM"); + _print_next_block((*par_num)++, + "MCP ROM"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP SCPAD"); - *global = true; + /* clear latched SCPAD PATIRY from MCP */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, + 1UL << 10); break; } @@ -4605,45 +4631,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PGLUE_B"); + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + _print_next_block((*par_num)++, + "PGLUE_B"); _print_parity(bp, - PGLUE_B_REG_PGLUE_B_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "ATC"); + PGLUE_B_REG_PGLUE_B_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + _print_next_block((*par_num)++, "ATC"); _print_parity(bp, ATC_REG_ATC_PRTY_STS); + break; } - break; } - /* Clear the bit */ sig &= ~cur_bit; } } - return par_num; + return res; } static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, u32 *sig) { + bool res = false; + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || @@ -4660,23 +4691,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0(bp, - sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1(bp, - sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2(bp, - sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); - par_num = bnx2x_check_blocks_with_parity3( - sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4(bp, - sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + res |= bnx2x_check_blocks_with_parity0(bp, + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); + res |= bnx2x_check_blocks_with_parity1(bp, + sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity2(bp, + sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); + res |= bnx2x_check_blocks_with_parity3(bp, + sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity4(bp, + sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); if (print) pr_cont("\n"); + } - return true; - } else - return false; + return res; } /** @@ -5205,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -7126,7 +7156,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) int port = BP_PORT(bp); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; u32 low, high; - u32 val; + u32 val, reg; DP(NETIF_MSG_HW, "starting port init port %d\n", port); @@ -7271,6 +7301,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) val |= CHIP_IS_E1(bp) ? 0 : 0x10; REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + /* SCPAD_PARITY should NOT trigger close the gates */ + reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + + reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(bp)) { @@ -9315,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9669,11 +9714,10 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_pf_set_vfs_vlan(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_dcbx_stop_hw_tx(bp); - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) bnx2x_dcbx_resume_hw_tx(bp); + } /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) @@ -9879,7 +9923,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; - int rc = false; + bool rc = false; if (down_trylock(&bnx2x_prev_sem)) return false; @@ -11149,6 +11193,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_mac_hwinfo(bp); } + if (!BP_NOMCP(bp)) { + /* Read physical port identifier from shmem */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->phys_port_id, val, val2); + bp->flags |= HAS_PHYS_PORT_ID; + } + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) @@ -11685,9 +11737,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) static int bnx2x_open(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - bool global = false; - int other_engine = BP_PATH(bp) ? 0 : 1; - bool other_load_status, load_status; int rc; bp->stats_init = true; @@ -11703,6 +11752,10 @@ static int bnx2x_open(struct net_device *dev) * Parity recovery is only relevant for PF driver. */ if (IS_PF(bp)) { + int other_engine = BP_PATH(bp) ? 0 : 1; + bool other_load_status, load_status; + bool global = false; + other_load_status = bnx2x_get_load_status(bp, other_engine); load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || @@ -11746,7 +11799,7 @@ static int bnx2x_open(struct net_device *dev) rc = bnx2x_nic_load(bp, LOAD_OPEN); if (rc) return rc; - return bnx2x_open_epilog(bp); + return 0; } /* called with rtnl_lock */ @@ -12044,6 +12097,20 @@ static int bnx2x_validate_addr(struct net_device *dev) return 0; } +static int bnx2x_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_port_id *ppid) +{ + struct bnx2x *bp = netdev_priv(netdev); + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(bp->phys_port_id); + memcpy(ppid->id, bp->phys_port_id, ppid->id_len); + + return 0; +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -12073,19 +12140,15 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = bnx2x_low_latency_recv, #endif + .ndo_get_phys_port_id = bnx2x_get_phys_port_id, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) { struct device *dev = &bp->pdev->dev; - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { - bp->flags |= USING_DAC_FLAG; - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); - return -EIO; - } - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { dev_err(dev, "System does not support DMA, aborting\n"); return -EIO; } @@ -12237,10 +12300,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; if (!CHIP_IS_E1x(bp)) { - dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; + dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; } @@ -12248,8 +12314,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; - if (bp->flags & USING_DAC_FLAG) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= NETIF_F_HIGHDMA; /* Add Loopback capability to the device */ dev->hw_features |= NETIF_F_LOOPBACK; @@ -12274,34 +12339,11 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return rc; } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, - enum bnx2x_pci_bus_speed *speed) -{ - u32 link_speed, val = 0; - - pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); - *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - - link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; - - switch (link_speed) { - case 3: - *speed = BNX2X_PCI_LINK_SPEED_8000; - break; - case 2: - *speed = BNX2X_PCI_LINK_SPEED_5000; - break; - default: - *speed = BNX2X_PCI_LINK_SPEED_2500; - } -} - static int bnx2x_check_firmware(struct bnx2x *bp) { const struct firmware *firmware = bp->firmware; @@ -12612,24 +12654,24 @@ static int set_max_cos_est(int chip_id) return BNX2X_MULTI_TX_COS_E1X; case BCM57712: case BCM57712_MF: - case BCM57712_VF: return BNX2X_MULTI_TX_COS_E2_E3A0; case BCM57800: case BCM57800_MF: - case BCM57800_VF: case BCM57810: case BCM57810_MF: case BCM57840_4_10: case BCM57840_2_20: case BCM57840_O: case BCM57840_MFO: - case BCM57810_VF: case BCM57840_MF: - case BCM57840_VF: case BCM57811: case BCM57811_MF: - case BCM57811_VF: return BNX2X_MULTI_TX_COS_E3B0; + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: return 1; default: pr_err("Unknown board_type (%d), aborting\n", chip_id); @@ -12658,8 +12700,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct bnx2x *bp; - int pcie_width; - enum bnx2x_pci_bus_speed pcie_speed; + enum pcie_link_width pcie_width; + enum pci_bus_speed pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count, doorbell_size; int max_cos_est; @@ -12808,18 +12850,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } - - bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); - BNX2X_DEV_INFO("got pcie width %d and speed %d\n", - pcie_width, pcie_speed); - - BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || + pcie_speed == PCI_SPEED_UNKNOWN || + pcie_width == PCIE_LNK_WIDTH_UNKNOWN) + BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); + else + BNX2X_DEV_INFO( + "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", board_info[ent->driver_data].name, (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), pcie_width, - pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : - pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : - pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : + pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : + pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : + pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : "Unknown", dev->base_addr, bp->pdev->irq, dev->dev_addr); @@ -12838,7 +12881,6 @@ init_one_exit: pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return rc; } @@ -12921,7 +12963,6 @@ static void __bnx2x_remove(struct pci_dev *pdev, pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void bnx2x_remove_one(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4c..3efbb35267c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 9fbeee522d2..32c92abf509 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1217,9 +1217,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, ETH_VLAN_FILTER_CLASSIFY, config); } -#define list_next_entry(pos, member) \ - list_entry((pos)->member.next, typeof(*(pos)), member) - /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9ad012bdd91..2e46c28fc60 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, bnx2x_vfop_qdtor, cmd->done); return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, cmd->block); + } else { + BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); + return -ENOMEM; } - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", - vf->abs_vfid, vfop->rc); - return -ENOMEM; } static void @@ -2018,6 +2018,8 @@ failed: void bnx2x_iov_remove_one(struct bnx2x *bp) { + int vf_idx; + /* if SRIOV is not enabled there's nothing to do */ if (!IS_SRIOV(bp)) return; @@ -2026,6 +2028,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) pci_disable_sriov(bp->pdev); DP(BNX2X_MSG_IOV, "sriov disabled\n"); + /* disable access to all VFs */ + for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { + bnx2x_pretend_func(bp, + HW_VF_HANDLE(bp, + bp->vfdb->sriov.first_vf_in_pf + + vf_idx)); + DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", + bp->vfdb->sriov.first_vf_in_pf + vf_idx); + bnx2x_vf_enable_internal(bp, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + /* free vf database */ __bnx2x_iov_free_vfdb(bp); } @@ -2802,7 +2816,7 @@ struct set_vf_state_cookie { u8 state; }; -void bnx2x_set_vf_state(void *cookie) +static void bnx2x_set_vf_state(void *cookie) { struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; @@ -3100,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", num_vfs_param, BNX2X_NR_VIRTFN(bp)); @@ -3197,7 +3216,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp) * the "acquire" messages to appear on the VF PF channel. */ DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); - pci_disable_sriov(bp->pdev); + bnx2x_disable_sriov(bp); rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3225,8 +3244,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) pci_disable_sriov(bp->pdev); } -int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, - struct pf_vf_bulletin_content **bulletin) +static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) { if (bp->state != BNX2X_STATE_OPEN) { BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3390,14 +3410,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); if (rc) { BNX2X_ERR("failed to delete eth macs\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* remove existing uc list macs */ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); if (rc) { BNX2X_ERR("failed to delete uc_list macs\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* configure the new mac to device */ @@ -3405,6 +3427,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, BNX2X_ETH_MAC, &ramrod_flags); +out: bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } @@ -3467,7 +3490,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) &ramrod_flags); if (rc) { BNX2X_ERR("failed to delete vlans\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* send queue update ramrod to configure default vlan and silent @@ -3501,7 +3525,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) rc = bnx2x_config_vlan_mac(bp, &ramrod_param); if (rc) { BNX2X_ERR("failed to configure vlan\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* configure default vlan to vf queue and set silent @@ -3519,18 +3544,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) rc = bnx2x_queue_state_change(bp, &q_params); if (rc) { BNX2X_ERR("Failed to configure default VLAN\n"); - return rc; + goto out; } /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured th Vlan before vf was - * and we were called because the VF came up later + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later */ +out: vf->cfg_flags &= ~VF_CFG_VLAN; - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); } - return 0; + return rc; } /* crc is the first field in the bulletin board. Compute the crc over the @@ -3637,29 +3662,6 @@ alloc_mem_err: return -ENOMEM; } -int bnx2x_open_epilog(struct bnx2x *bp) -{ - /* Enable sriov via delayed work. This must be done via delayed work - * because it causes the probe of the vf devices to be run, which invoke - * register_netdevice which must have rtnl lock taken. As we are holding - * the lock right now, that could only work if the probe would not take - * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm fails) it can't assume - * the lock is being held for it. Using delayed work here allows the - * probe code to simply take the lock (i.e. wait for it to be released - * if it is being held). We only want to do this if the number of VFs - * was set before PF driver was loaded. - */ - if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } - - return 0; -} - void bnx2x_iov_channel_down(struct bnx2x *bp) { int vf_idx; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 059f0d460af..1ff6a936662 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp) void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); -int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} -static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 86436c77af0..3b75070411a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index da16953eb2e..efa8a151d78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv) mutex_unlock(&bp->vf2pf_mutex); } +/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */ +static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, + enum channel_tlvs req_tlv) +{ + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + do { + if (tlv->type == req_tlv) + return tlv; + + if (!tlv->length) { + BNX2X_ERR("Found TLV with length 0\n"); + return NULL; + } + + tlvs_list += tlv->length; + tlv = (struct channel_tlv *)tlvs_list; + } while (tlv->type != CHANNEL_TLV_LIST_END); + + DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv); + + return NULL; +} + /* list the types and lengths of the tlvs on the buffer */ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) { @@ -128,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); *done = PFVF_STATUS_SUCCESS; - return 0; + return -EINVAL; } /* Write message address */ @@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) int rc = 0, attempts = 0; struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; + struct vfpf_port_phys_id_resp_tlv *phys_port_resp; u32 vf_id; bool resources_acquired = false; @@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* pf 2 vf bulletin board address */ req->bulletin_addr = bp->pf2vf_bulletin_mapping; + /* Request physical port identifier */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, + CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); + /* add list termination tlv */ - bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + bnx2x_add_tlv(bp, req, + req->first_tlv.tl.length + sizeof(struct channel_tlv), + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* output tlvs list */ @@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) } } + /* Retrieve physical port id (if possible) */ + phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *) + bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_PHYS_PORT_ID); + if (phys_port_resp) { + memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); + bp->flags |= HAS_PHYS_PORT_ID; + } + /* get HW info */ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bp->link_params.chip_id = bp->common.chip_id; @@ -980,56 +1020,62 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, dmae.len = len32; /* issue the command and wait for completion */ - return bnx2x_issue_dmae_with_comp(bp, &dmae); + return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); } -static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, + struct bnx2x_virtf *vf) { struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); - u64 vf_addr; - dma_addr_t pf_addr; u16 length, type; - int rc; - struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; /* prepare response */ type = mbx->first_tlv.tl.type; length = type == CHANNEL_TLV_ACQUIRE ? sizeof(struct pfvf_acquire_resp_tlv) : sizeof(struct pfvf_general_resp_tlv); - bnx2x_add_tlv(bp, resp, 0, type, length); - resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); - bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END, + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); +} + +static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; + dma_addr_t pf_addr; + u64 vf_addr; + int rc; + bnx2x_dp_tlv_list(bp, resp); DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + /* send response */ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + mbx->first_tlv.resp_msg_offset; pf_addr = mbx->msg_mapping + offsetof(struct bnx2x_vf_mbx_msg, resp); - /* copy the response body, if there is one, before the header, as the vf - * is sensitive to the header being written + /* Copy the response buffer. The first u64 is written afterwards, as + * the vf is sensitive to the header being written */ - if (resp->hdr.tl.length > sizeof(u64)) { - length = resp->hdr.tl.length - sizeof(u64); - vf_addr += sizeof(u64); - pf_addr += sizeof(u64); - rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, - U64_HI(vf_addr), - U64_LO(vf_addr), - length/4); - if (rc) { - BNX2X_ERR("Failed to copy response body to VF %d\n", - vf->abs_vfid); - goto mbx_error; - } - vf_addr -= sizeof(u64); - pf_addr -= sizeof(u64); + vf_addr += sizeof(u64); + pf_addr += sizeof(u64); + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + (sizeof(union pfvf_tlvs) - sizeof(u64))/4); + if (rc) { + BNX2X_ERR("Failed to copy response body to VF %d\n", + vf->abs_vfid); + goto mbx_error; } + vf_addr -= sizeof(u64); + pf_addr -= sizeof(u64); /* ack the FW */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); @@ -1060,6 +1106,36 @@ mbx_error: bnx2x_vf_release(bp, vf, false); /* non blocking */ } +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + bnx2x_vf_mbx_resp_single_tlv(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf); +} + +static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_port_phys_id_resp_tlv *port_id; + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, + sizeof(struct vfpf_port_phys_id_resp_tlv)); + + port_id = (struct vfpf_port_phys_id_resp_tlv *) + (((u8 *)buffer) + *offset); + memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); +} + static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx, int vfop_status) { @@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; struct pf_vf_resc *resc = &resp->resc; u8 status = bnx2x_pfvf_status_codes(vfop_status); + u16 length; memset(resp, 0, sizeof(*resp)); @@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resc->hw_sbs[i].sb_qid); DP_CONT(BNX2X_MSG_IOV, "]\n"); + /* prepare response */ + length = sizeof(struct pfvf_acquire_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); + + /* Handle possible VF requests for physical port identifiers. + * 'length' should continue to indicate the offset of the first empty + * place in the buffer (i.e., where next TLV should be inserted) + */ + if (bnx2x_search_tlv_list(bp, &mbx->msg->req, + CHANNEL_TLV_PHYS_PORT_ID)) + bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + /* send the response */ vf->op_rc = vfop_status; - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf); } static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) /* process the VF message header */ mbx->first_tlv = mbx->msg->req.first_tlv; + /* Clean response buffer to refrain from falsely seeing chains */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + /* dispatch the request (will prepare the response) */ bnx2x_vf_mbx_request(bp, vf, mbx); goto mbx_done; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 1179fe06d0c..208568bc7a7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv { } resc; }; +struct vfpf_port_phys_id_resp_tlv { + struct channel_tlv tl; + u8 id[ETH_ALEN]; + u8 padding[2]; +}; + #define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues * stats will be coalesced on * the leading RSS queue @@ -398,6 +404,7 @@ enum channel_tlvs { CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_UPDATE_RSS, + CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 99394bd49a1..f58a8b80302 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, csk->vlan_id = path_resp->vlan_id; - memcpy(csk->ha, path_resp->mac_addr, 6); + memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN); if (test_bit(SK_F_IPV6, &csk->flags)) memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, sizeof(struct in6_addr)); @@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; - memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); + memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN); cp->cnic_ops = &cnic_bnx2x_ops; cp->start_hw = cnic_start_bnx2x_hw; diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 0658b43e148..ebbfe25acaa 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -353,8 +353,8 @@ struct cnic_ulp_ops { atomic_t ref_count; }; -extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); -extern int cnic_unregister_driver(int ulp_type); +int cnic_unregister_driver(int ulp_type); #endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 12d961c4ebc..f3dd93b4aea 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 133 +#define TG3_MIN_NUM 134 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "Jul 29, 2013" +#define DRV_MODULE_RELDATE "Sep 16, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) return err; } +static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) +{ + return tg3_writephy(tp, MII_TG3_MISC_SHDW, + reg | val | MII_TG3_MISC_SHDW_WREN); +} + static int tg3_bmcr_reset(struct tg3 *tp) { u32 phy_control; @@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) spin_lock_bh(&tp->lock); - if (tg3_readphy(tp, reg, &val)) + if (__tg3_readphy(tp, mii_id, reg, &val)) val = -EIO; spin_unlock_bh(&tp->lock); @@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) spin_lock_bh(&tp->lock); - if (tg3_writephy(tp, reg, val)) + if (__tg3_writephy(tp, mii_id, reg, val)) ret = -EIO; spin_unlock_bh(&tp->lock); @@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: @@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp) TG3_CPMU_PHY_STRAP_IS_SERDES; if (is_serdes) tp->phy_addr += 7; + } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { + int addr; + + addr = ssb_gige_get_phyaddr(tp->pdev); + if (addr < 0) + return addr; + tp->phy_addr = addr; } else tp->phy_addr = TG3_PHY_MII_ADDR; @@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->reset = &tg3_mdio_reset; - tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); tp->mdio_bus->irq = &tp->mdio_irq[0]; for (i = 0; i < PHY_MAX_ADDR; i++) @@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; else autoneg = tp->link_config.autoneg; @@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; spin_lock_bh(&tp->lock); @@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; /* Attach the MAC to the PHY. */ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), @@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); return -EINVAL; } @@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } @@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) return; } - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_SCR5_SEL | - MII_TG3_MISC_SHDW_SCR5_LPED | + reg = MII_TG3_MISC_SHDW_SCR5_LPED | MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL | MII_TG3_MISC_SHDW_SCR5_C125OE; if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_APD_SEL | - MII_TG3_MISC_SHDW_APD_WKTM_84MS; + reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; if (enable) reg |= MII_TG3_MISC_SHDW_APD_ENABLE; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); } static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) @@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -6848,12 +6862,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data, frag_size); - if (!skb) { - tg3_frag_free(frag_size != 0, data); - goto drop_it_no_recycle; - } - skb_reserve(skb, TG3_RX_OFFSET(tp)); /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ @@ -6861,6 +6869,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ri->data = NULL; + skb = build_skb(data, frag_size); + if (!skb) { + tg3_frag_free(frag_size != 0, data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); } else { tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); @@ -8918,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) void (*write_op)(struct tg3 *, u32, u32); int i, err; + if (!pci_device_is_present(tp->pdev)) + return -ENODEV; + tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -9196,10 +9213,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent) memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); } - if (err) - return err; - - return 0; + return err; } static int tg3_set_mac_addr(struct net_device *dev, void *p) @@ -10618,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) static ssize_t tg3_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct tg3 *tp = dev_get_drvdata(dev); u32 temperature; spin_lock_bh(&tp->lock); @@ -10639,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, TG3_TEMP_MAX_OFFSET); -static struct attribute *tg3_attributes[] = { +static struct attribute *tg3_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, NULL }; - -static const struct attribute_group tg3_group = { - .attrs = tg3_attributes, -}; +ATTRIBUTE_GROUPS(tg3); static void tg3_hwmon_close(struct tg3 *tp) { if (tp->hwmon_dev) { hwmon_device_unregister(tp->hwmon_dev); tp->hwmon_dev = NULL; - sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); } } static void tg3_hwmon_open(struct tg3 *tp) { - int i, err; + int i; u32 size = 0; struct pci_dev *pdev = tp->pdev; struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; @@ -10679,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp) if (!size) return; - /* Register hwmon sysfs hooks */ - err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); - if (err) { - dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); - return; - } - - tp->hwmon_dev = hwmon_device_register(&pdev->dev); + tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", + tp, tg3_groups); if (IS_ERR(tp->hwmon_dev)) { tp->hwmon_dev = NULL; dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); - sysfs_remove_group(&pdev->dev.kobj, &tg3_group); } } @@ -11035,7 +11036,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num) name = tp->dev->name; else { name = &tnapi->irq_lbl[0]; - snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + if (tnapi->tx_buffers && tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-txrx-%d", tp->dev->name, irq_num); + else if (tnapi->tx_buffers) + snprintf(name, IFNAMSIZ, + "%s-tx-%d", tp->dev->name, irq_num); + else if (tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-rx-%d", tp->dev->name, irq_num); + else + snprintf(name, IFNAMSIZ, + "%s-%d", tp->dev->name, irq_num); name[IFNAMSIZ-1] = 0; } @@ -11572,10 +11584,11 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down_prepare(tp); - - tg3_carrier_off(tp); + if (pci_device_is_present(tp->pdev)) { + tg3_power_down_prepare(tp); + tg3_carrier_off(tp); + } return 0; } @@ -11907,7 +11920,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_ethtool_gset(phydev, cmd); } @@ -11974,7 +11987,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_ethtool_sset(phydev, cmd); } @@ -12093,12 +12106,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); - spin_lock_bh(&tp->lock); if (device_may_wakeup(dp)) tg3_flag_set(tp, WOL_ENABLE); else tg3_flag_clear(tp, WOL_ENABLE); - spin_unlock_bh(&tp->lock); return 0; } @@ -12131,7 +12142,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); } else { u32 bmcr; @@ -12247,7 +12258,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -13194,8 +13205,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) return -ENOMEM; tx_data = skb_put(skb, tx_len); - memcpy(tx_data, tp->dev->dev_addr, 6); - memset(tx_data + 6, 0x0, 8); + memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); + memset(tx_data + ETH_ALEN, 0x0, 8); tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); @@ -13598,16 +13609,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, if (stmpconf.flags) return -EINVAL; - switch (stmpconf.tx_type) { - case HWTSTAMP_TX_ON: - tg3_flag_set(tp, TX_TSTAMP_EN); - break; - case HWTSTAMP_TX_OFF: - tg3_flag_clear(tp, TX_TSTAMP_EN); - break; - default: + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - } switch (stmpconf.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -13669,6 +13673,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? -EFAULT : 0; } @@ -13683,7 +13692,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_mii_ioctl(phydev, ifr, cmd); } @@ -14921,6 +14930,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); + + if (tg3_flag(tp, 5717_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | + LED_CTRL_BLINK_RATE_MASK; + break; case SHASTA_EXT_LED_MAC: @@ -15759,9 +15774,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) reg = TG3PCI_GEN2_PRODID_ASICREV; else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || @@ -16485,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Clear this out for sanity. */ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ + tw32(TG3PCI_REG_BASE_ADDR, 0); + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && @@ -16632,8 +16653,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp) int len; addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == 6) { - memcpy(dev->dev_addr, addr, 6); + if (addr && len == ETH_ALEN) { + memcpy(dev->dev_addr, addr, ETH_ALEN); return 0; } return -ENODEV; @@ -16643,7 +16664,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp) { struct net_device *dev = tp->dev; - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); return 0; } #endif @@ -17052,10 +17073,6 @@ static int tg3_test_dma(struct tg3 *tp) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); -#if 0 - /* Unneeded, already done by tg3_get_invariants. */ - tg3_switch_clocks(tp); -#endif if (tg3_asic_rev(tp) != ASIC_REV_5700 && tg3_asic_rev(tp) != ASIC_REV_5701) @@ -17083,20 +17100,6 @@ static int tg3_test_dma(struct tg3 *tp) break; } -#if 0 - /* validate data reached card RAM correctly. */ - for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { - u32 val; - tg3_read_mem(tp, 0x2100 + (i*4), &val); - if (le32_to_cpu(val) != p[i]) { - dev_err(&tp->pdev->dev, - "%s: Buffer corrupted on device! " - "(%d != %d)\n", __func__, val, i); - /* ret = -ENODEV here? */ - } - p[i] = 0; - } -#endif /* Now read it back. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); if (ret) { @@ -17362,8 +17365,10 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_flag_set(tp, FLUSH_POSTED_WRITES); if (ssb_gige_one_dma_at_once(pdev)) tg3_flag_set(tp, ONE_DMA_AT_ONCE); - if (ssb_gige_have_roboswitch(pdev)) + if (ssb_gige_have_roboswitch(pdev)) { + tg3_flag_set(tp, USE_PHYLIB); tg3_flag_set(tp, ROBOSWITCH); + } if (ssb_gige_is_rgmii(pdev)) tg3_flag_set(tp, RGMII_MODE); } @@ -17409,9 +17414,12 @@ static int tg3_init_one(struct pci_dev *pdev, tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) { + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { tg3_flag_set(tp, ENABLE_APE); tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { @@ -17628,7 +17636,7 @@ static int tg3_init_one(struct pci_dev *pdev, if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); @@ -17685,7 +17693,6 @@ err_out_free_res: err_out_disable_pdev: if (pci_is_enabled(pdev)) pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return err; } @@ -17717,7 +17724,6 @@ static void tg3_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } @@ -17727,10 +17733,12 @@ static int tg3_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; tg3_reset_task_cancel(tp); tg3_phy_stop(tp); @@ -17772,6 +17780,8 @@ out: tg3_phy_start(tp); } +unlock: + rtnl_unlock(); return err; } @@ -17780,10 +17790,12 @@ static int tg3_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; netif_device_attach(dev); @@ -17807,6 +17819,8 @@ out: if (!err) tg3_phy_start(tp); +unlock: + rtnl_unlock(); return err; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 70257808aa3..5c3835aa1e1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -68,6 +68,9 @@ #define TG3PCI_DEVICE_TIGON3_5762 0x1687 #define TG3PCI_DEVICE_TIGON3_5725 0x1643 #define TG3PCI_DEVICE_TIGON3_5727 0x16f3 +#define TG3PCI_DEVICE_TIGON3_57764 0x1642 +#define TG3PCI_DEVICE_TIGON3_57767 0x1683 +#define TG3PCI_DEVICE_TIGON3_57787 0x1641 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index b78e69e0e52..248bc37cb41 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad, bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); if (!bnad->bar0) { dev_err(&pdev->dev, "ioremap for bar0 failed\n"); - pci_set_drvdata(pdev, NULL); return -ENOMEM; } pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, @@ -3300,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad, err = pci_request_regions(pdev, BNAD_NAME); if (err) goto disable_device; - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { *using_dac = true; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) - goto release_regions; - } + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto release_regions; *using_dac = false; } pci_set_master(pdev); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index aefee77523f..f7e033f8a00 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -372,38 +372,37 @@ extern u32 bnad_rxqs_per_cq; /* * EXTERN PROTOTYPES */ -extern u32 *cna_get_firmware_buf(struct pci_dev *pdev); +u32 *cna_get_firmware_buf(struct pci_dev *pdev); /* Netdev entry point prototypes */ -extern void bnad_set_rx_mode(struct net_device *netdev); -extern struct net_device_stats *bnad_get_netdev_stats( - struct net_device *netdev); -extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); -extern int bnad_enable_default_bcast(struct bnad *bnad); -extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); -extern void bnad_set_ethtool_ops(struct net_device *netdev); -extern void bnad_cb_completion(void *arg, enum bfa_status status); +void bnad_set_rx_mode(struct net_device *netdev); +struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); +int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); +int bnad_enable_default_bcast(struct bnad *bnad); +void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); +void bnad_set_ethtool_ops(struct net_device *netdev); +void bnad_cb_completion(void *arg, enum bfa_status status); /* Configuration & setup */ -extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); -extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); +void bnad_tx_coalescing_timeo_set(struct bnad *bnad); +void bnad_rx_coalescing_timeo_set(struct bnad *bnad); -extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); -extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); -extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); /* Timer start/stop protos */ -extern void bnad_dim_timer_start(struct bnad *bnad); +void bnad_dim_timer_start(struct bnad *bnad); /* Statistics */ -extern void bnad_netdev_qstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); -extern void bnad_netdev_hwstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); +void bnad_netdev_qstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); +void bnad_netdev_hwstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); /* Debugfs */ -void bnad_debugfs_init(struct bnad *bnad); -void bnad_debugfs_uninit(struct bnad *bnad); +void bnad_debugfs_init(struct bnad *bnad); +void bnad_debugfs_uninit(struct bnad *bnad); /* MACROS */ /* To set & get the stats counters */ diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 78d6d6b970e..4fc5c8ef512 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -106,7 +106,6 @@ #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ #define XGMAC_ADDR_AE 0x80000000 -#define XGMAC_MAX_FILTER_ADDR 31 /* PMT Control and Status */ #define XGMAC_PMT_POINTER_RESET 0x80000000 @@ -384,6 +383,7 @@ struct xgmac_priv { struct device *device; struct napi_struct napi; + int max_macs; struct xgmac_extra_stats xstats; spinlock_t stats_lock; @@ -1060,13 +1060,13 @@ static int xgmac_stop(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); - netif_stop_queue(dev); - if (readl(priv->base + XGMAC_DMA_INTR_ENA)) napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_disable(dev); + /* Disable the MAC core */ xgmac_mac_disable(priv->base); @@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev) netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", netdev_mc_count(dev), netdev_uc_count(dev)); - if (dev->flags & IFF_PROMISC) { - writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); - return; - } + if (dev->flags & IFF_PROMISC) + value |= XGMAC_FRAME_FILTER_PR; memset(hash_filter, 0, sizeof(hash_filter)); - if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { + if (netdev_uc_count(dev) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; } @@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev) goto out; } - if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { + if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; } else { @@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) } out: - for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) - xgmac_set_mac_addr(ioaddr, NULL, reg); + for (i = reg; i <= priv->max_macs; i++) + xgmac_set_mac_addr(ioaddr, NULL, i); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); @@ -1372,11 +1370,8 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu) } old_mtu = dev->mtu; - dev->mtu = new_mtu; /* return early if the buffer sizes will not change */ - if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) - return 0; if (old_mtu == new_mtu) return 0; @@ -1384,8 +1379,9 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu) if (!netif_running(dev)) return 0; - /* Bring the interface down and then back up */ + /* Bring interface down, change mtu and bring interface back up */ xgmac_stop(dev); + dev->mtu = new_mtu; return xgmac_open(dev); } @@ -1761,6 +1757,13 @@ static int xgmac_probe(struct platform_device *pdev) uid = readl(priv->base + XGMAC_VERSION); netdev_info(ndev, "h/w version is 0x%x\n", uid); + /* Figure out how many valid mac address filter registers we have */ + writel(1, priv->base + XGMAC_ADDR_HIGH(31)); + if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) + priv->max_macs = 31; + else + priv->max_macs = 7; + writel(0, priv->base + XGMAC_DMA_INTR_ENA); ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq == -ENXIO) { diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h index 5ccbed1784d..8abb46b3903 100644 --- a/drivers/net/ethernet/chelsio/cxgb/common.h +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap) return board_info(adap)->clock_core / 1000000; } -extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); -extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); -extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); -extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); - -extern void t1_interrupts_enable(adapter_t *adapter); -extern void t1_interrupts_disable(adapter_t *adapter); -extern void t1_interrupts_clear(adapter_t *adapter); -extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); -extern void t1_elmer0_ext_intr(adapter_t *adapter); -extern int t1_slow_intr_handler(adapter_t *adapter); - -extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); -extern const struct board_info *t1_get_board_info(unsigned int board_id); -extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid, +int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); +int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); + +void t1_interrupts_enable(adapter_t *adapter); +void t1_interrupts_disable(adapter_t *adapter); +void t1_interrupts_clear(adapter_t *adapter); +int t1_elmer0_ext_intr_handler(adapter_t *adapter); +void t1_elmer0_ext_intr(adapter_t *adapter); +int t1_slow_intr_handler(adapter_t *adapter); + +int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); +const struct board_info *t1_get_board_info(unsigned int board_id); +const struct board_info *t1_get_board_info_from_ids(unsigned int devid, unsigned short ssid); -extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); -extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, +int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); +int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, struct adapter_params *p); -extern int t1_init_hw_modules(adapter_t *adapter); -extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); -extern void t1_free_sw_modules(adapter_t *adapter); -extern void t1_fatal_err(adapter_t *adapter); -extern void t1_link_changed(adapter_t *adapter, int port_id); -extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, +int t1_init_hw_modules(adapter_t *adapter); +int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); +void t1_free_sw_modules(adapter_t *adapter); +void t1_fatal_err(adapter_t *adapter); +void t1_link_changed(adapter_t *adapter, int port_id); +void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, int speed, int duplex, int pause); #endif /* _CXGB_COMMON_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index d7048db9863..1d021059f09 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1168,7 +1168,6 @@ out_free_dev: pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return err; } @@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); t1_sw_reset(pdev); } diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index 40c7b93abab..eb33a31b08a 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) { - memcpy(mac_addr, cmac->instance->mac_addr, 6); + memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN); return 0; } @@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) */ /* Store local copy */ - memcpy(cmac->instance->mac_addr, ma, 6); + memcpy(cmac->instance->mac_addr, ma, ETH_ALEN); lo = ((u32) ma[1] << 8) | (u32) ma[0]; mid = ((u32) ma[3] << 8) | (u32) ma[2]; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index b650951791d..45d77334d7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3374,7 +3374,6 @@ out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); out: return err; } @@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev) kfree(adapter); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h index 6990f6c6522..81029b872bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/regs.h +++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h @@ -685,10 +685,6 @@ #define V_BUSY(x) ((x) << S_BUSY) #define F_BUSY V_BUSY(1U) -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - #define A_MC7_EXT_MODE1 0x108 #define A_MC7_EXT_MODE2 0x10c @@ -749,14 +745,6 @@ #define A_MC7_CAL 0x128 -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - -#define S_BUSY 31 -#define V_BUSY(x) ((x) << S_BUSY) -#define F_BUSY V_BUSY(1U) - #define S_CAL_FAULT 30 #define V_CAL_FAULT(x) ((x) << S_CAL_FAULT) #define F_CAL_FAULT V_CAL_FAULT(1U) @@ -815,9 +803,6 @@ #define V_OP(x) ((x) << S_OP) #define F_OP V_OP(1U) -#define F_OP V_OP(1U) -#define A_SF_OP 0x6dc - #define A_MC7_BIST_ADDR_BEG 0x168 #define A_MC7_BIST_ADDR_END 0x16c @@ -830,8 +815,6 @@ #define V_CONT(x) ((x) << S_CONT) #define F_CONT V_CONT(1U) -#define F_CONT V_CONT(1U) - #define A_MC7_INT_ENABLE 0x178 #define S_AE 17 @@ -1017,8 +1000,6 @@ #define V_NICMODE(x) ((x) << S_NICMODE) #define F_NICMODE V_NICMODE(1U) -#define F_NICMODE V_NICMODE(1U) - #define S_IPV6ENABLE 15 #define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) #define F_IPV6ENABLE V_IPV6ENABLE(1U) @@ -1562,27 +1543,15 @@ #define A_ULPRX_STAG_ULIMIT 0x530 #define A_ULPRX_RQ_LLIMIT 0x534 -#define A_ULPRX_RQ_LLIMIT 0x534 #define A_ULPRX_RQ_ULIMIT 0x538 -#define A_ULPRX_RQ_ULIMIT 0x538 #define A_ULPRX_PBL_LLIMIT 0x53c #define A_ULPRX_PBL_ULIMIT 0x540 -#define A_ULPRX_PBL_ULIMIT 0x540 #define A_ULPRX_TDDP_TAGMASK 0x524 -#define A_ULPRX_RQ_LLIMIT 0x534 -#define A_ULPRX_RQ_LLIMIT 0x534 - -#define A_ULPRX_RQ_ULIMIT 0x538 -#define A_ULPRX_RQ_ULIMIT 0x538 - -#define A_ULPRX_PBL_ULIMIT 0x540 -#define A_ULPRX_PBL_ULIMIT 0x540 - #define A_ULPTX_CONFIG 0x580 #define S_CFG_CQE_SOP_MASK 1 @@ -2053,8 +2022,6 @@ #define V_TMMODE(x) ((x) << S_TMMODE) #define F_TMMODE V_TMMODE(1U) -#define F_TMMODE V_TMMODE(1U) - #define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c #define A_MC5_DB_FILTER_TABLE 0x710 @@ -2454,8 +2421,6 @@ #define V_TXACTENABLE(x) ((x) << S_TXACTENABLE) #define F_TXACTENABLE V_TXACTENABLE(1U) -#define A_XGM_SERDES_CTRL0 0x8e0 - #define S_RESET3 23 #define V_RESET3(x) ((x) << S_RESET3) #define F_RESET3 V_RESET3(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 9c89dc8fe10..632b318eb38 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), - skb->tail - skb->transport_header, + skb_tail_pointer(skb) - + skb_transport_header(skb), adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index dfd1e36f575..6c930885045 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -48,15 +48,16 @@ #include <linux/vmalloc.h> #include <asm/io.h> #include "cxgb4_uld.h" -#include "t4_hw.h" -#define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 4 -#define FW_VERSION_MICRO 0 +#define T4FW_VERSION_MAJOR 0x01 +#define T4FW_VERSION_MINOR 0x06 +#define T4FW_VERSION_MICRO 0x18 +#define T4FW_VERSION_BUILD 0x00 -#define FW_VERSION_MAJOR_T5 0 -#define FW_VERSION_MINOR_T5 0 -#define FW_VERSION_MICRO_T5 0 +#define T5FW_VERSION_MAJOR 0x01 +#define T5FW_VERSION_MINOR 0x08 +#define T5FW_VERSION_MICRO 0x1C +#define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -241,6 +242,26 @@ struct pci_params { unsigned char width; }; +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + struct adapter_params { struct tp_params tp; struct vpd_params vpd; @@ -260,7 +281,7 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; - unsigned char rev; /* chip revision */ + enum chip_type chip; /* chip code */ unsigned char offload; unsigned char bypass; @@ -268,6 +289,23 @@ struct adapter_params { unsigned int ofldq_wr_cred; }; +#include "t4fw_api.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + + struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -513,25 +551,6 @@ struct sge { struct l2t_data; -#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) -#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) - -#define CHELSIO_T4 0x4 -#define CHELSIO_T5 0x5 - -enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), - T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, - - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, - T5_LAST_REV = T5_A1, -}; - #ifdef CONFIG_PCI_IOV /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial @@ -716,12 +735,12 @@ enum { static inline int is_t5(enum chip_type chip) { - return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; } static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) @@ -901,7 +920,11 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); -int t4_check_fw_version(struct adapter *adapter); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c73cabdbd4c..d6b12e035a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { { 0, } }; -#define FW_FNAME "cxgb4/t4fw.bin" +#define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" -#define FW_CFNAME "cxgb4/t4-config.txt" +#define FW4_CFNAME "cxgb4/t4-config.txt" #define FW5_CFNAME "cxgb4/t5-config.txt" MODULE_DESCRIPTION(DRV_DESC); @@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); -MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); /* @@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap); } /* - * Returns 0 if new FW was successfully loaded, a positive errno if a load was - * started but failed, and a negative errno if flash load couldn't start. - */ -static int upgrade_fw(struct adapter *adap) -{ - int ret; - u32 vers, exp_major; - const struct fw_hdr *hdr; - const struct firmware *fw; - struct device *dev = adap->pdev_dev; - char *fw_file_name; - - switch (CHELSIO_CHIP_VERSION(adap->chip)) { - case CHELSIO_T4: - fw_file_name = FW_FNAME; - exp_major = FW_VERSION_MAJOR; - break; - case CHELSIO_T5: - fw_file_name = FW5_FNAME; - exp_major = FW_VERSION_MAJOR_T5; - break; - default: - dev_err(dev, "Unsupported chip type, %x\n", adap->chip); - return -EINVAL; - } - - ret = request_firmware(&fw, fw_file_name, dev); - if (ret < 0) { - dev_err(dev, "unable to load firmware image %s, error %d\n", - fw_file_name, ret); - return ret; - } - - hdr = (const struct fw_hdr *)fw->data; - vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { - ret = -EINVAL; /* wrong major version, won't do */ - goto out; - } - - /* - * If the flash FW is unusable or we found something newer, load it. - */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || - vers > adap->params.fw_vers) { - dev_info(dev, "upgrading firmware ...\n"); - ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, - /*force=*/false); - if (!ret) - dev_info(dev, - "firmware upgraded to version %pI4 from %s\n", - &hdr->fw_ver, fw_file_name); - else - dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); - } else { - /* - * Tell our caller that we didn't upgrade the firmware. - */ - ret = -EINVAL; - } - -out: release_firmware(fw); - return ret; -} - -/* * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. * The allocated memory is cleared. */ @@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset) static int get_regs_len(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) return T4_REGMAP_SIZE; else return T5_REGMAP_SIZE; @@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); val2 = t4_read_reg(adapter, SGE_STAT_MATCH); @@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return CHELSIO_CHIP_VERSION(ap->chip) | - (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, static const unsigned int *reg_ranges; int arr_size = 0, buf_size = 0; - if (is_t4(ap->chip)) { + if (is_t4(ap->params.chip)) { reg_ranges = &t4_reg_ranges[0]; arr_size = ARRAY_SIZE(t4_reg_ranges); buf_size = T4_REGMAP_SIZE; @@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap) size = t4_read_reg(adap, MA_EDRAM1_BAR); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); } - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); if (i & EXT_MEM_ENABLE) add_debugfs_mem(adap, "mc", MEM_MC, @@ -3419,7 +3353,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3588,7 +3522,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs) do { v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3708,7 +3642,7 @@ static void process_db_drop(struct work_struct *work) adap = container_of(work, struct adapter, db_drop_task); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { disable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); drain_db_fifo(adap, 1); @@ -3753,7 +3687,7 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); @@ -3762,7 +3696,7 @@ void t4_db_full(struct adapter *adap) void t4_db_dropped(struct adapter *adap) { - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) queue_work(workq, &adap->db_drop_task); } @@ -3789,7 +3723,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; - lli.adapter_type = adap->params.rev; + lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> @@ -3983,6 +3917,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, struct net_device *event_dev; int ret = NOTIFY_DONE; struct bonding *bond = netdev_priv(ifa->idev->dev); + struct list_head *iter; struct slave *slave; struct pci_dev *first_pdev = NULL; @@ -3995,7 +3930,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, * in all of them only once. */ read_lock(&bond->lock); - bond_for_each_slave(bond, slave) { + bond_for_each_slave(bond, slave, iter) { if (!first_pdev) { ret = clip_add(slave->dev, ifa, event); /* If clip_add is success then only initialize @@ -4482,7 +4417,7 @@ static void setup_memwin(struct adapter *adap) u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mem_win0_base = bar0 + MEMWIN0_BASE; mem_win1_base = bar0 + MEMWIN1_BASE; mem_win2_base = bar0 + MEMWIN2_BASE; @@ -4667,8 +4602,10 @@ static int adap_init0_config(struct adapter *adapter, int reset) const struct firmware *cf; unsigned long mtype = 0, maddr = 0; u32 finiver, finicsum, cfcsum; - int ret, using_flash; + int ret; + int config_issued = 0; char *fw_config_file, fw_config_file_path[256]; + char *config_name = NULL; /* * Reset device if necessary. @@ -4685,9 +4622,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * then use that. Otherwise, use the configuration file stored * in the adapter flash ... */ - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { case CHELSIO_T4: - fw_config_file = FW_CFNAME; + fw_config_file = FW4_CFNAME; break; case CHELSIO_T5: fw_config_file = FW5_CFNAME; @@ -4701,13 +4638,16 @@ static int adap_init0_config(struct adapter *adapter, int reset) ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); if (ret < 0) { - using_flash = 1; + config_name = "On FLASH"; mtype = FW_MEMTYPE_CF_FLASH; maddr = t4_flash_cfg_addr(adapter); } else { u32 params[7], val[7]; - using_flash = 0; + sprintf(fw_config_file_path, + "/lib/firmware/%s", fw_config_file); + config_name = fw_config_file_path; + if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { @@ -4775,6 +4715,26 @@ static int adap_init0_config(struct adapter *adapter, int reset) FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); + + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + config_name = "Firmware Default"; + } + + config_issued = 1; if (ret < 0) goto bye; @@ -4815,7 +4775,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; - sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); /* * Return successfully and note that we're operating with parameters * not supplied by the driver, rather than from hard-wired @@ -4823,11 +4782,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ adapter->flags |= USING_SOFT_PARAMS; dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ - "Configuration File %s, version %#x, computed checksum %#x\n", - (using_flash - ? "in device FLASH" - : fw_config_file_path), - finiver, cfcsum); + "Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); return 0; /* @@ -4836,9 +4792,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * want to issue a warning since this is fairly common.) */ bye: - if (ret != -ENOENT) - dev_warn(adapter->pdev_dev, "Configuration file error %d\n", - -ret); + if (config_issued && ret != -ENOENT) + dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", + config_name, -ret); return ret; } @@ -5085,6 +5041,47 @@ bye: return ret; } +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T4, + .fs_name = FW4_CFNAME, + .fw_mod_name = FW4_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T4, + .fw_ver = __cpu_to_be32(FW_VERSION(T4)), + .intfver_nic = FW_INTFVER(T4, NIC), + .intfver_vnic = FW_INTFVER(T4, VNIC), + .intfver_ri = FW_INTFVER(T4, RI), + .intfver_iscsi = FW_INTFVER(T4, ISCSI), + .intfver_fcoe = FW_INTFVER(T4, FCOE), + }, + }, { + .chip = CHELSIO_T5, + .fs_name = FW5_CFNAME, + .fw_mod_name = FW5_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ @@ -5122,44 +5119,54 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - ret = t4_check_fw_version(adap); - - /* The error code -EFAULT is returned by t4_check_fw_version() if - * firmware on adapter < supported firmware. If firmware on adapter - * is too old (not supported by driver) and we're the MASTER_PF set - * adapter state to DEV_STATE_UNINIT to force firmware upgrade - * and reinitialization. - */ - if ((adap->flags & MASTER_PF) && ret == -EFAULT) - state = DEV_STATE_UNINIT; + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { - if (ret == -EINVAL || ret == -EFAULT || ret > 0) { - if (upgrade_fw(adap) >= 0) { - /* - * Note that the chip was reset as part of the - * firmware upgrade so we don't reset it again - * below and grab the new firmware version. - */ - reset = 0; - ret = t4_check_fw_version(adap); - } else - if (ret == -EFAULT) { - /* - * Firmware is old but still might - * work if we force reinitialization - * of the adapter. Ignoring FW upgrade - * failure. - */ - dev_warn(adap->pdev_dev, - "Ignoring firmware upgrade " - "failure, and forcing driver " - "to reinitialize the " - "adapter.\n"); - ret = 0; - } + struct fw_info *fw_info; + struct fw_hdr *card_fw; + const struct firmware *fw; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); + if (fw_info == NULL) { + dev_err(adap->pdev_dev, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = t4_alloc_mem(sizeof(*card_fw)); + + /* Get FW from from /lib/firmware/ */ + ret = request_firmware(&fw, fw_info->fw_mod_name, + adap->pdev_dev); + if (ret < 0) { + dev_err(adap->pdev_dev, + "unable to load firmware image %s, error %d\n", + fw_info->fw_mod_name, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, + state, &reset); + + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + t4_free_mem(card_fw); + if (ret < 0) - return ret; + goto bye; } /* @@ -5244,7 +5251,7 @@ static int adap_init0(struct adapter *adap) if (ret == -ENOENT) { dev_info(adap->pdev_dev, "No Configuration File present " - "on adapter. Using hard-wired " + "on adapter. Using hard-wired " "configuration parameters.\n"); ret = adap_init0_no_config(adap, reset); } @@ -5786,7 +5793,7 @@ static void print_port_info(const struct net_device *dev) netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", adap->params.vpd.id, - CHELSIO_CHIP_RELEASE(adap->params.rev), buf, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); @@ -5909,7 +5916,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_unmap_bar0; - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { s_qpp = QUEUESPERPAGEPF1 * adapter->fn; qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); @@ -6063,7 +6070,7 @@ sriov: out_free_dev: free_some_resources(adapter); out_unmap_bar: - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); out_unmap_bar0: iounmap(adapter->regs); @@ -6074,7 +6081,6 @@ sriov: pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); return err; } @@ -6116,13 +6122,12 @@ static void remove_one(struct pci_dev *pdev) free_some_resources(adapter); iounmap(adapter->regs); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); } else pci_release_regions(pdev); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ac311f5f3eb..cc380c36e1a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) u32 val; if (q->pend_cred >= 8) { val = PIDX(q->pend_cred / 8); - if (!is_t4(adap->chip)) + if (!is_t4(adap->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | @@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), QID(q->cntxt_id) | PIDX(n)); } else { @@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap, return 0; } - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) __skb_pull(skb, sizeof(struct cpl_trace_pkt)); else __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); @@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); struct sge *s = &q->adap->sge; - int cpl_trace_pkt = is_t4(q->adap->chip) ? + int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) @@ -2182,7 +2182,7 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->chip)) { + if (!is_t4(adap->params.chip)) { unsigned int s_qpp; unsigned short udb_density; unsigned long qpshift; @@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap) * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * and generate an interrupt when this occurs so we can recover. */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, V_HP_INT_THRESH(M_HP_INT_THRESH) | V_LP_INT_THRESH(M_LP_INT_THRESH), diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4cbb2f9850b..74a6fce5a15 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; u32 mc_bist_status_rdata, mc_bist_data_pattern; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mc_bist_cmd = MC_BIST_CMD; mc_bist_cmd_addr = MC_BIST_CMD_ADDR; mc_bist_cmd_len = MC_BIST_CMD_LEN; @@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); @@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) { int i; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* * Setup offset into PCIE memory window. Address must be a @@ -863,104 +863,169 @@ unlock: } /** - * get_fw_version - read the firmware version + * t4_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ -static int get_fw_version(struct adapter *adapter, u32 *vers) +int t4_get_fw_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, fw_ver), 1, vers, 0); + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); } /** - * get_tp_version - read the TP microcode version + * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ -static int get_tp_version(struct adapter *adapter, u32 *vers) +int t4_get_tp_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + + return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } -/** - * t4_check_fw_version - check if the FW is compatible with this driver - * @adapter: the adapter - * - * Checks if an adapter's FW is compatible with the driver. Returns 0 - * if there's exact match, a negative error if the version could not be - * read or there's a major version mismatch, and a positive value if the - * expected major version is found but there's a minor version mismatch. +/* Is the given firmware API compatible with the one the driver was compiled + * with? */ -int t4_check_fw_version(struct adapter *adapter) +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) { - u32 api_vers[2]; - int ret, major, minor, micro; - int exp_major, exp_minor, exp_micro; - ret = get_fw_version(adapter, &adapter->params.fw_vers); - if (!ret) - ret = get_tp_version(adapter, &adapter->params.tp_vers); - if (!ret) - ret = t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, intfver_nic), - 2, api_vers, 1); - if (ret) - return ret; + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; - major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); - minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); - micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { - case CHELSIO_T4: - exp_major = FW_VERSION_MAJOR; - exp_minor = FW_VERSION_MINOR; - exp_micro = FW_VERSION_MICRO; - break; - case CHELSIO_T5: - exp_major = FW_VERSION_MAJOR_T5; - exp_minor = FW_VERSION_MINOR_T5; - exp_micro = FW_VERSION_MICRO_T5; - break; - default: - dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", - adapter->chip); - return -EINVAL; - } + return 0; +} - memcpy(adapter->params.api_vers, api_vers, - sizeof(adapter->params.api_vers)); +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, + int k, int c) +{ + const char *reason; - if (major < exp_major || (major == exp_major && minor < exp_minor) || - (major == exp_major && minor == exp_minor && micro < exp_micro)) { - dev_err(adapter->pdev_dev, - "Card has firmware version %u.%u.%u, minimum " - "supported firmware is %u.%u.%u.\n", major, minor, - micro, exp_major, exp_minor, exp_micro); - return -EFAULT; + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; } - if (major != exp_major) { /* major mismatch - fail */ - dev_err(adapter->pdev_dev, - "card FW has major version %u, driver wants %u\n", - major, exp_major); - return -EINVAL; + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; } - if (minor == exp_minor && micro == exp_micro) - return 0; /* perfect match */ + return 0; + +install: + dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); - /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = -t4_read_flash(adap, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + dev_err(adap->pdev_dev, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == DEV_STATE_UNINIT && + should_install_fs_fw(adap, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); + if (ret != 0) { + dev_err(adap->pdev_dev, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + memcpy(card_fw, fs_fw, sizeof(*card_fw)); + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + dev_err(adap->pdev_dev, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), + FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + ret = EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); + adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter @@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter) PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, - is_t4(adapter->chip) ? + is_t4(adapter->params.chip) ? pcie_intr_info : t5_pcie_intr_info); if (fat) @@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v, int_cause_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); else int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); @@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) #define GET_STAT(name) \ t4_read_reg64(adap, \ - (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ + (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) @@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, { u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); @@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, int i; u32 port_cfg_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); else port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); @@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, return -EINVAL; #define EPIO_REG(name) \ - (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ + (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ T5_PORT_REG(port, MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); @@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) { int i, off; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* Align on a 2KB boundary. */ @@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; - unsigned int max_naddr = is_t4(adap->chip) ? + unsigned int max_naddr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; - unsigned int max_mac_addr = is_t4(adap->chip) ? + unsigned int max_mac_addr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter) { int ret, ver; uint16_t device_id; + u32 pl_rev; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); - adapter->params.rev = t4_read_reg(adapter, PL_REV); + pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); ret = get_flash_params(adapter); if (ret < 0) { @@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter) */ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); ver = device_id >> 12; + adapter->params.chip = 0; switch (ver) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", @@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter) return -EINVAL; } - /* Reassign the updated revision field */ - adapter->params.rev = adapter->chip; - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index ef146c0ba48..0a8205d69d2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1092,6 +1092,11 @@ #define PL_REV 0x1943c +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) + #define LE_DB_CONFIG 0x19c04 #define HASHEN 0x00100000U @@ -1199,4 +1204,13 @@ #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) +#define A_PL_VF_REV 0x4 +#define A_PL_VF_WHOAMI 0x0 +#define A_PL_VF_REVISION 0x8 + +#define S_CHIPID 4 +#define M_CHIPID 0xfU +#define V_CHIPID(x) ((x) << S_CHIPID) +#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6f77ac48774..74fea74ce0a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2157,7 +2157,7 @@ struct fw_debug_cmd { struct fw_hdr { u8 ver; - u8 reserved1; + u8 chip; /* terminator chip type */ __be16 len512; /* bin length in units of 512-bytes */ __be32 fw_ver; /* firmware version */ __be32 tp_microcode_ver; @@ -2176,6 +2176,11 @@ struct fw_hdr { __be32 reserved6[23]; }; +enum fw_hdr_chip { + FW_HDR_CHIP_T4, + FW_HDR_CHIP_T5 +}; + #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index be5c7ef6ca9..68eaa9c88c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -344,7 +344,6 @@ struct adapter { unsigned long registered_device_map; unsigned long open_device_map; unsigned long flags; - enum chip_type chip; struct adapter_params params; /* queue and interrupt resources */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 40c22e7de15..0899c098359 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) /* * Chip version 4, revision 0x3f (cxgb4vf). */ - return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); + return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); } /* @@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev, reg_block_dump(adapter, regbuf, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + + /* T5 adds new registers in the PL Register map. + */ reg_block_dump(adapter, regbuf, T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, - T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); + T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) + ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); reg_block_dump(adapter, regbuf, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); @@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; + unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter) return err; } + adapter->params.chip = 0; switch (adapter->pdev->device >> 12) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); break; } @@ -2782,11 +2789,9 @@ err_unmap_bar: err_free_adapter: kfree(adapter); - pci_set_drvdata(pdev, NULL); err_release_regions: pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); pci_clear_master(pdev); err_disable_device: @@ -2851,7 +2856,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) } iounmap(adapter->regs); kfree(adapter); - pci_set_drvdata(pdev, NULL); } /* @@ -2908,7 +2912,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) #define CH_DEVICE(devid, idx) \ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } -static struct pci_device_id cxgb4vf_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = { CH_DEVICE(0xb000, 0), /* PE10K FPGA */ CH_DEVICE(0x4800, 0), /* T440-dbg */ CH_DEVICE(0x4801, 0), /* T420-cr */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index df296af20bd..0a89963c48c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, @@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb, * Builds an sk_buff from the given packet gather list. Returns the * sk_buff or %NULL if sk_buff allocation failed. */ -struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, - unsigned int skb_len, unsigned int pull_len) +static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, + unsigned int pull_len) { struct sk_buff *skb; @@ -1443,7 +1444,7 @@ out: * Releases the pages of a packet gather list. We do not own the last * page on the list and do not free it. */ -void t4vf_pktgl_free(const struct pkt_gl *gl) +static void t4vf_pktgl_free(const struct pkt_gl *gl) { int frag; @@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq) * on this queue. If the system is under memory shortage use a fairly * long delay to help recovery. */ -int process_responses(struct sge_rspq *rspq, int budget) +static int process_responses(struct sge_rspq *rspq, int budget) { struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); int budget_left = budget; @@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter) * The MSI interrupt handler handles data events from SGE response queues as * well as error and other async events as they all use the same MSI vector. */ -irqreturn_t t4vf_intr_msi(int irq, void *cookie) +static irqreturn_t t4vf_intr_msi(int irq, void *cookie) { struct adapter *adapter = cookie; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 53cbfed21d0..61362450d05 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -39,21 +39,28 @@ #include "../cxgb4/t4fw_api.h" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) +/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * = "a" for T4 FPGA; "b" for T4 FPGA, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + */ #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, + T4_LAST_REV = T4_A2, - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, }; @@ -203,6 +210,7 @@ struct adapter_params { struct vpd_params vpd; /* Vital Product Data */ struct rss_params rss; /* Receive Side Scaling */ struct vf_resources vfres; /* Virtual Function Resource limits */ + enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ }; @@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } int t4vf_wait_dev_ready(struct adapter *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9f96dc3bb11..d958c44341b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 7b756cf9474..ff78dfaec50 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2309,7 +2309,6 @@ err_out_release_regions: err_out_disable_device: pci_disable_device(pdev); err_out_free_netdev: - pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; @@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev) enic_iounmap(enic); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(netdev); } } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 5f5896e522d..7080ad6c401 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev) /* DM9000 network board routine ---------------------------- */ -static void -dm9000_reset(board_info_t * db) -{ - dev_dbg(db->dev, "resetting device\n"); - - /* RESET device */ - writeb(DM9000_NCR, db->io_addr); - udelay(200); - writeb(NCR_RST, db->io_data); - udelay(200); -} - /* * Read a byte from I/O port */ @@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value) writeb(value, db->io_data); } +static void +dm9000_reset(board_info_t *db) +{ + dev_dbg(db->dev, "resetting device\n"); + + /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 + * The essential point is that we have to do a double reset, and the + * instruction is to set LBK into MAC internal loopback mode. + */ + iow(db, DM9000_NCR, 0x03); + udelay(100); /* Application note says at least 20 us */ + if (ior(db, DM9000_NCR) & 1) + dev_err(db->dev, "dm9000 did not respond to first reset\n"); + + iow(db, DM9000_NCR, 0); + iow(db, DM9000_NCR, 0x03); + udelay(100); + if (ior(db, DM9000_NCR) & 1) + dev_err(db->dev, "dm9000 did not respond to second reset\n"); +} + /* routines for sending block to chip */ static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) @@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = { static void dm9000_show_carrier(board_info_t *db, unsigned carrier, unsigned nsr) { + int lpa; struct net_device *ndev = db->ndev; + struct mii_if_info *mii = &db->mii; unsigned ncr = dm9000_read_locked(db, DM9000_NCR); - if (carrier) - dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", + if (carrier) { + lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); + dev_info(db->dev, + "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", ndev->name, (nsr & NSR_SPEED) ? 10 : 100, - (ncr & NCR_FDX) ? "full" : "half"); - else + (ncr & NCR_FDX) ? "full" : "half", lpa); + } else { dev_info(db->dev, "%s: link down\n", ndev->name); + } } static void @@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev) (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ + iow(db, DM9000_GPR, 0); - dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ - dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ + /* If we are dealing with DM9000B, some extra steps are required: a + * manual phy reset, and setting init params. + */ + if (db->type == TYPE_DM9000B) { + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); + } ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; @@ -1603,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev) if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { mac_src = "platform data"; - memcpy(ndev->dev_addr, pdata->dev_addr, 6); + memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); } if (!is_valid_ether_addr(ndev->dev_addr)) { diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index eaab73cf27c..38148b0e3a9 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev) iounmap(de->regs); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 263b92c00cb..c05b66dfcc3 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev) pci_disable_device (pdev); } -static struct pci_device_id de4x5_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 83139307861..5ad9e3e3c0b 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -523,7 +523,6 @@ err_out_res: err_out_disable: pci_disable_device(pdev); err_out_free: - pci_set_drvdata(pdev, NULL); free_netdev(dev); return err; @@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev) db->buf_pool_ptr, db->buf_pool_dma_ptr); pci_release_regions(pdev); free_netdev(dev); /* free board information */ - - pci_set_drvdata(pdev, NULL); } DMFE_DBUG(0, "dmfe_remove_one() exit", 0); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 4e8cfa2ac80..add05f14b38 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); - pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 93845afe1ce..a5397b13072 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -429,7 +429,6 @@ err_out_release: err_out_disable: pci_disable_device(pdev); err_out_free: - pci_set_drvdata(pdev, NULL); free_netdev(dev); return err; @@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev) db->buf_pool_ptr, db->buf_pool_dma_ptr); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index c7b04ecf5b4..62fe512bb21 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_out_cleardev: - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); @@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev) pci_iounmap(pdev, np->base_addr); free_netdev(dev); } - - pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index 9b84cb04fe5..ab7ebac6fbe 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -289,7 +289,6 @@ out: err_unmap: pci_iounmap(pdev, private->ioaddr); reg_fail: - pci_set_drvdata(pdev, NULL); dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle); tx_buf_fail: dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle); @@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev) unregister_netdev(dev); pci_iounmap(pdev, card->ioaddr); - pci_set_drvdata(pdev, NULL); dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle); dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle); free_netdev(dev); diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index afa8e3af2c4..4fb756d219f 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev) pci_release_regions (pdev); pci_disable_device (pdev); } - pci_set_drvdata (pdev, NULL); } static struct pci_driver rio_driver = { diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index bf3bf6f22c9..113cd799a13 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -703,7 +703,6 @@ err_out_unmap_tx: dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_cleardev: - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_res: pci_release_regions(pdev); @@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev) pci_iounmap(pdev, np->base); pci_release_regions(pdev); free_netdev(dev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index db020230bd0..5878df619b5 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "4.9.134.0u" +#define DRV_VER "4.9.224.0u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE_NUM_VLANS_SUPPORTED 64 #define BE_UMC_NUM_VLANS_SUPPORTED 15 -#define BE_MAX_EQD 96u +#define BE_MAX_EQD 128u #define BE_MAX_TX_FRAG_COUNT 30 #define EVNT_Q_LEN 1024 @@ -199,8 +199,37 @@ struct be_eq_obj { u16 spurious_intr; struct napi_struct napi; struct be_adapter *adapter; + +#ifdef CONFIG_NET_RX_BUSY_POLL +#define BE_EQ_IDLE 0 +#define BE_EQ_NAPI 1 /* napi owns this EQ */ +#define BE_EQ_POLL 2 /* poll owns this EQ */ +#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL) +#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */ +#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */ +#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) +#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD) + unsigned int state; + spinlock_t lock; /* lock to serialize napi and busy-poll */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ } ____cacheline_aligned_in_smp; +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + bool enable; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 prev_eqd; /* in usecs */ + u32 et_eqd; /* configured val when aic is off */ + ulong jiffies; + u64 rx_pkts_prev; /* Used to calculate RX pps */ + u64 tx_reqs_prev; /* Used to calculate TX pps */ +}; + +enum { + NAPI_POLLING, + BUSY_POLLING +}; + struct be_mcc_obj { struct be_queue_info q; struct be_queue_info cq; @@ -215,6 +244,7 @@ struct be_tx_stats { u64 tx_compl; ulong tx_jiffies; u32 tx_stops; + u32 tx_drv_drops; /* pkts dropped by driver */ struct u64_stats_sync sync; struct u64_stats_sync sync_compl; }; @@ -239,15 +269,12 @@ struct be_rx_page_info { struct be_rx_stats { u64 rx_bytes; u64 rx_pkts; - u64 rx_pkts_prev; - ulong rx_jiffies; u32 rx_drops_no_skbs; /* skb allocation errors */ u32 rx_drops_no_frags; /* HW has no fetched frags */ u32 rx_post_fail; /* page post alloc failures */ u32 rx_compl; u32 rx_mcast_pkts; u32 rx_compl_err; /* completions with err set */ - u32 rx_pps; /* pkts per second */ struct u64_stats_sync sync; }; @@ -316,6 +343,11 @@ struct be_drv_stats { u32 rx_input_fifo_overflow_drop; u32 pmem_fifo_overflow_drop; u32 jabber_events; + u32 rx_roce_bytes_lsd; + u32 rx_roce_bytes_msd; + u32 rx_roce_frames; + u32 roce_drops_payload_len; + u32 roce_drops_crc; }; struct be_vf_cfg { @@ -405,6 +437,7 @@ struct be_adapter { u32 big_page_size; /* Compounded page size shared by rx wrbs */ struct be_drv_stats drv_stats; + struct be_aic_obj aic_obj[MAX_EVT_QS]; u16 vlans_added; u8 vlan_tag[VLAN_N_VID]; u8 vlan_prio_bmap; /* Available Priority BitMap */ @@ -437,7 +470,6 @@ struct be_adapter { u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ bool stats_cmd_sent; - u32 if_type; struct { u32 size; u32 total_size; @@ -471,9 +503,10 @@ struct be_adapter { }; #define be_physfn(adapter) (!adapter->virtfn) +#define be_virtfn(adapter) (adapter->virtfn) #define sriov_enabled(adapter) (adapter->num_vfs > 0) -#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \ - be_physfn(adapter)) +#define sriov_want(adapter) (be_physfn(adapter) && \ + (num_vfs || pci_num_vf(adapter->pdev))) #define for_all_vfs(adapter, vf_cfg, i) \ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ i++, vf_cfg++) @@ -546,6 +579,10 @@ extern const struct ethtool_ops be_ethtool_ops; for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \ i++, eqo++) +#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \ + for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ + i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) + #define is_mcc_eqo(eqo) (eqo->idx == 0) #define mcc_eqo(adapter) (&adapter->eq_obj[0]) @@ -696,27 +733,137 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD; } -extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, - u16 num_popped); -extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); -extern void be_parse_stats(struct be_adapter *adapter); -extern int be_load_fw(struct be_adapter *adapter, u8 *func); -extern bool be_is_wol_supported(struct be_adapter *adapter); -extern bool be_pause_supported(struct be_adapter *adapter); -extern u32 be_get_fw_log_level(struct be_adapter *adapter); +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ + bool status = true; + + spin_lock(&eqo->lock); /* BH is already disabled */ + if (eqo->state & BE_EQ_LOCKED) { + WARN_ON(eqo->state & BE_EQ_NAPI); + eqo->state |= BE_EQ_NAPI_YIELD; + status = false; + } else { + eqo->state = BE_EQ_NAPI; + } + spin_unlock(&eqo->lock); + return status; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ + spin_lock(&eqo->lock); /* BH is already disabled */ + + WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); + eqo->state = BE_EQ_IDLE; + + spin_unlock(&eqo->lock); +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ + bool status = true; + + spin_lock_bh(&eqo->lock); + if (eqo->state & BE_EQ_LOCKED) { + eqo->state |= BE_EQ_POLL_YIELD; + status = false; + } else { + eqo->state |= BE_EQ_POLL; + } + spin_unlock_bh(&eqo->lock); + return status; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ + spin_lock_bh(&eqo->lock); + + WARN_ON(eqo->state & (BE_EQ_NAPI)); + eqo->state = BE_EQ_IDLE; + + spin_unlock_bh(&eqo->lock); +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ + spin_lock_init(&eqo->lock); + eqo->state = BE_EQ_IDLE; +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ + local_bh_disable(); + + /* It's enough to just acquire napi lock on the eqo to stop + * be_busy_poll() from processing any queueus. + */ + while (!be_lock_napi(eqo)) + mdelay(1); + + local_bh_enable(); +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ + return true; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ + return false; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, + u16 num_popped); +void be_link_status_update(struct be_adapter *adapter, u8 link_status); +void be_parse_stats(struct be_adapter *adapter); +int be_load_fw(struct be_adapter *adapter, u8 *func); +bool be_is_wol_supported(struct be_adapter *adapter); +bool be_pause_supported(struct be_adapter *adapter); +u32 be_get_fw_log_level(struct be_adapter *adapter); + +static inline int fw_major_num(const char *fw_ver) +{ + int fw_major = 0; + + sscanf(fw_ver, "%d.", &fw_major); + + return fw_major; +} + int be_update_queues(struct be_adapter *adapter); int be_poll(struct napi_struct *napi, int budget); /* * internal function to initialize-cleanup roce device. */ -extern void be_roce_dev_add(struct be_adapter *); -extern void be_roce_dev_remove(struct be_adapter *); +void be_roce_dev_add(struct be_adapter *); +void be_roce_dev_remove(struct be_adapter *); /* * internal function to open-close roce device during ifup-ifdown. */ -extern void be_roce_dev_open(struct be_adapter *); -extern void be_roce_dev_close(struct be_adapter *); +void be_roce_dev_open(struct be_adapter *); +void be_roce_dev_close(struct be_adapter *); #endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index bd0e0c0bbcd..e0e8bc1ef14 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -522,7 +522,7 @@ static u16 be_POST_stage_get(struct be_adapter *adapter) return sem & POST_STAGE_MASK; } -int lancer_wait_ready(struct be_adapter *adapter) +static int lancer_wait_ready(struct be_adapter *adapter) { #define SLIPORT_READY_TIMEOUT 30 u32 sliport_status; @@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, } else { req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ + + /* coalesce-wm field in this cmd is not relevant to Lancer. + * Lancer uses COMMON_MODIFY_CQ to set this field + */ + if (!lancer_chip(adapter)) + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, @@ -1198,7 +1205,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) if (lancer_chip(adapter)) { req->hdr.version = 1; - req->if_id = cpu_to_le16(adapter->if_handle); } else if (BEx_chip(adapter)) { if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) req->hdr.version = 2; @@ -1206,6 +1212,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) req->hdr.version = 2; } + if (req->hdr.version > 0) + req->if_id = cpu_to_le16(adapter->if_handle); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; @@ -1435,8 +1443,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); /* version 1 of the cmd is not supported only by BE2 */ - if (!BE2_chip(adapter)) + if (BE2_chip(adapter)) + hdr->version = 0; + if (BE3_chip(adapter) || lancer_chip(adapter)) hdr->version = 1; + else + hdr->version = 2; be_mcc_notify(adapter); adapter->stats_cmd_sent = true; @@ -1718,11 +1730,12 @@ err: /* set the EQ delay interval of an EQ to specified value * Uses async mcc */ -int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, + int num) { struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; - int status = 0; + int status = 0, i; spin_lock_bh(&adapter->mcc_lock); @@ -1736,13 +1749,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); - req->num_eq = cpu_to_le32(1); - req->delay[0].eq_id = cpu_to_le32(eq_id); - req->delay[0].phase = 0; - req->delay[0].delay_multiplier = cpu_to_le32(eqd); + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->set_eqd[i].phase = 0; + req->set_eqd[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } be_mcc_notify(adapter); - err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1750,7 +1765,7 @@ err: /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous) + u32 num, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1770,7 +1785,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, req->interface_id = if_id; req->promiscuous = promiscuous; - req->untagged = untagged; + req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, @@ -1839,7 +1854,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } + if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != + req->if_flags_mask) { + dev_warn(&adapter->pdev->dev, + "Cannot set rx filter flags 0x%x\n", + req->if_flags_mask); + dev_warn(&adapter->pdev->dev, + "Interface is capable of 0x%x flags only\n", + be_if_cap_flags(adapter)); + } + req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); + status = be_mcc_notify_wait(adapter); + err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -3519,7 +3546,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) struct be_cmd_enable_disable_vf *req; int status; - if (!lancer_chip(adapter)) + if (BEx_chip(adapter)) return 0; spin_lock_bh(&adapter->mcc_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 108ca8abf0a..0075686276a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control { } __packed; /******************** Modify EQ Delay *******************/ +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +}; + struct be_cmd_req_modify_eq_delay { struct be_cmd_req_hdr hdr; u32 num_eq; - struct { - u32 eq_id; - u32 phase; - u32 delay_multiplier; - } delay[8]; + struct be_set_eqd set_eqd[MAX_EVT_QS]; } __packed; struct be_cmd_resp_modify_eq_delay { @@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 { u32 rsvd[4]; }; +struct be_port_rxf_stats_v2 { + u32 rsvd0[10]; + u32 roce_bytes_received_lsd; + u32 roce_bytes_received_msd; + u32 rsvd1[5]; + u32 roce_frames_received; + u32 rx_crc_errors; + u32 rx_alignment_symbol_errors; + u32 rx_pause_frames; + u32 rx_priority_pause_frames; + u32 rx_control_frames; + u32 rx_in_range_errors; + u32 rx_out_range_errors; + u32 rx_frame_too_long; + u32 rx_address_filtered; + u32 rx_dropped_too_small; + u32 rx_dropped_too_short; + u32 rx_dropped_header_too_small; + u32 rx_dropped_tcp_length; + u32 rx_dropped_runt; + u32 rsvd2[10]; + u32 rx_ip_checksum_errs; + u32 rx_tcp_checksum_errs; + u32 rx_udp_checksum_errs; + u32 rsvd3[7]; + u32 rx_switched_unicast_packets; + u32 rx_switched_multicast_packets; + u32 rx_switched_broadcast_packets; + u32 rsvd4[3]; + u32 tx_pauseframes; + u32 tx_priority_pauseframes; + u32 tx_controlframes; + u32 rsvd5[10]; + u32 rxpp_fifo_overflow_drop; + u32 rx_input_fifo_overflow_drop; + u32 pmem_fifo_overflow_drop; + u32 jabber_events; + u32 rsvd6[3]; + u32 rx_drops_payload_size; + u32 rx_drops_clipped_header; + u32 rx_drops_crc; + u32 roce_drops_payload_len; + u32 roce_drops_crc; + u32 rsvd7[19]; +}; + +struct be_rxf_stats_v2 { + struct be_port_rxf_stats_v2 port[4]; + u32 rsvd0[2]; + u32 rx_drops_no_pbuf; + u32 rx_drops_no_txpb; + u32 rx_drops_no_erx_descr; + u32 rx_drops_no_tpre_descr; + u32 rsvd1[6]; + u32 rx_drops_too_many_frags; + u32 rx_drops_invalid_ring; + u32 forwarded_packets; + u32 rx_drops_mtu; + u32 rsvd2[35]; +}; + struct be_hw_stats_v1 { struct be_rxf_stats_v1 rxf; u32 rsvd0[BE_TXP_SW_SZ]; @@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 { struct be_hw_stats_v1 hw_stats; }; +struct be_erx_stats_v2 { + u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/ + u32 rsvd[3]; +}; + +struct be_hw_stats_v2 { + struct be_rxf_stats_v2 rxf; + u32 rsvd0[BE_TXP_SW_SZ]; + struct be_erx_stats_v2 erx; + struct be_pmem_stats pmem; + u32 rsvd1[18]; +}; + +struct be_cmd_req_get_stats_v2 { + struct be_cmd_req_hdr hdr; + u8 rsvd[sizeof(struct be_hw_stats_v2)]; +}; + +struct be_cmd_resp_get_stats_v2 { + struct be_cmd_resp_hdr hdr; + struct be_hw_stats_v2 hw_stats; +}; + /************** get fat capabilites *******************/ #define MAX_MODULES 27 #define MAX_MODES 4 @@ -1865,137 +1951,119 @@ struct be_cmd_resp_get_iface_list { struct be_if_desc if_desc; }; -extern int be_pci_fnum_get(struct be_adapter *adapter); -extern int be_fw_wait_ready(struct be_adapter *adapter); -extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, - bool permanent, u32 if_handle, u32 pmac_id); -extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, - u32 if_id, u32 *pmac_id, u32 domain); -extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, - int pmac_id, u32 domain); -extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, - u32 en_flags, u32 *if_handle, u32 domain); -extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, - u32 domain); -extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); -extern int be_cmd_cq_create(struct be_adapter *adapter, - struct be_queue_info *cq, struct be_queue_info *eq, - bool no_delay, int num_cqe_dma_coalesce); -extern int be_cmd_mccq_create(struct be_adapter *adapter, - struct be_queue_info *mccq, - struct be_queue_info *cq); -extern int be_cmd_txq_create(struct be_adapter *adapter, - struct be_tx_obj *txo); -extern int be_cmd_rxq_create(struct be_adapter *adapter, - struct be_queue_info *rxq, u16 cq_id, - u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); -extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, - int type); -extern int be_cmd_rxq_destroy(struct be_adapter *adapter, - struct be_queue_info *q); -extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, - u8 *link_status, u32 dom); -extern int be_cmd_reset(struct be_adapter *adapter); -extern int be_cmd_get_stats(struct be_adapter *adapter, - struct be_dma_mem *nonemb_cmd); -extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter, - struct be_dma_mem *nonemb_cmd); -extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, - char *fw_on_flash); - -extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd); -extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, - u16 *vtag_array, u32 num, bool untagged, - bool promiscuous); -extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); -extern int be_cmd_set_flow_control(struct be_adapter *adapter, - u32 tx_fc, u32 rx_fc); -extern int be_cmd_get_flow_control(struct be_adapter *adapter, - u32 *tx_fc, u32 *rx_fc); -extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, +int be_pci_fnum_get(struct be_adapter *adapter); +int be_fw_wait_ready(struct be_adapter *adapter); +int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + bool permanent, u32 if_handle, u32 pmac_id); +int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, + u32 *pmac_id, u32 domain); +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, + u32 domain); +int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, + u32 *if_handle, u32 domain); +int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); +int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); +int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, + struct be_queue_info *eq, bool no_delay, + int num_cqe_dma_coalesce); +int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, + struct be_queue_info *cq); +int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo); +int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, + u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); +int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + int type); +int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); +int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, + u8 *link_status, u32 dom); +int be_cmd_reset(struct be_adapter *adapter); +int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); +int lancer_cmd_get_pport_stats(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd); +int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, + char *fw_on_flash); +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); +int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, + u32 num, bool promiscuous); +int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); +int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); +int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); +int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *function_mode, u32 *function_caps, u16 *asic_rev); -extern int be_cmd_reset_function(struct be_adapter *adapter); -extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, - u32 rss_hash_opts, u16 table_size); -extern int be_process_mcc(struct be_adapter *adapter); -extern int be_cmd_set_beacon_state(struct be_adapter *adapter, - u8 port_num, u8 beacon, u8 status, u8 state); -extern int be_cmd_get_beacon_state(struct be_adapter *adapter, - u8 port_num, u32 *state); -extern int be_cmd_write_flashrom(struct be_adapter *adapter, - struct be_dma_mem *cmd, u32 flash_oper, - u32 flash_opcode, u32 buf_size); -extern int lancer_cmd_write_object(struct be_adapter *adapter, - struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, - const char *obj_name, - u32 *data_written, u8 *change_status, - u8 *addn_status); +int be_cmd_reset_function(struct be_adapter *adapter); +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, + u32 rss_hash_opts, u16 table_size); +int be_process_mcc(struct be_adapter *adapter); +int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, + u8 status, u8 state); +int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, + u32 *state); +int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 flash_oper, u32 flash_opcode, u32 buf_size); +int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, + const char *obj_name, u32 *data_written, + u8 *change_status, u8 *addn_status); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, - u32 data_size, u32 data_offset, const char *obj_name, - u32 *data_read, u32 *eof, u8 *addn_status); + u32 data_size, u32 data_offset, const char *obj_name, + u32 *data_read, u32 *eof, u8 *addn_status); int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - int offset); -extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, - struct be_dma_mem *nonemb_cmd); -extern int be_cmd_fw_init(struct be_adapter *adapter); -extern int be_cmd_fw_clean(struct be_adapter *adapter); -extern void be_async_mcc_enable(struct be_adapter *adapter); -extern void be_async_mcc_disable(struct be_adapter *adapter); -extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, - u32 loopback_type, u32 pkt_size, - u32 num_pkts, u64 pattern); -extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, - u32 byte_cnt, struct be_dma_mem *cmd); -extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, - struct be_dma_mem *nonemb_cmd); -extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, - u8 loopback_type, u8 enable); -extern int be_cmd_get_phy_info(struct be_adapter *adapter); -extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); -extern void be_detect_error(struct be_adapter *adapter); -extern int be_cmd_get_die_temperature(struct be_adapter *adapter); -extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); -extern int be_cmd_req_native_mode(struct be_adapter *adapter); -extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); -extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, - u32 *privilege, u32 domain); -extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, - u32 privileges, u32 vf_num); -extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_active, u32 *pmac_id, - u8 domain); -extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, - u8 *mac); -extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); -extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, - u8 mac_count, u32 domain); -extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, - u32 dom); -extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, - u32 domain, u16 intf_id, u16 hsw_mode); -extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, - u32 domain, u16 intf_id, u8 *mode); -extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); -extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, - struct be_dma_mem *cmd); -extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, - struct be_dma_mem *cmd, - struct be_fat_conf_params *cfgs); -extern int lancer_wait_ready(struct be_adapter *adapter); -extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); -extern int lancer_initiate_dump(struct be_adapter *adapter); -extern bool dump_present(struct be_adapter *adapter); -extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); -extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); + int offset); +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, + struct be_dma_mem *nonemb_cmd); +int be_cmd_fw_init(struct be_adapter *adapter); +int be_cmd_fw_clean(struct be_adapter *adapter); +void be_async_mcc_enable(struct be_adapter *adapter); +void be_async_mcc_disable(struct be_adapter *adapter); +int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, + u32 loopback_type, u32 pkt_size, u32 num_pkts, + u64 pattern); +int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt, + struct be_dma_mem *cmd); +int be_cmd_get_seeprom_data(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd); +int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, + u8 loopback_type, u8 enable); +int be_cmd_get_phy_info(struct be_adapter *adapter); +int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); +void be_detect_error(struct be_adapter *adapter); +int be_cmd_get_die_temperature(struct be_adapter *adapter); +int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +int be_cmd_req_native_mode(struct be_adapter *adapter); +int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); +void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, + u32 domain); +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, + u32 vf_num); +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, + bool *pmac_id_active, u32 *pmac_id, u8 domain); +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac); +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); +int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, + u32 domain); +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom); +int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, + u16 intf_id, u16 hsw_mode); +int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, + u16 intf_id, u8 *mode); +int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd); +int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd, + struct be_fat_conf_params *cfgs); +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); +int lancer_initiate_dump(struct be_adapter *adapter); +bool dump_present(struct be_adapter *adapter); +int lancer_test_and_set_rdy_state(struct be_adapter *adapter); +int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 domain); -extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, - u8 domain); -extern int be_cmd_get_if_id(struct be_adapter *adapter, - struct be_vf_cfg *vf_cfg, int vf_num); -extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); -extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); +int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); +int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, + int vf_num); +int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b440a1fac77..08330034d9e 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(rx_drops_mtu)}, /* Number of packets dropped due to random early drop function */ {DRVSTAT_INFO(eth_red_drops)}, - {DRVSTAT_INFO(be_on_die_temperature)} + {DRVSTAT_INFO(be_on_die_temperature)}, + {DRVSTAT_INFO(rx_roce_bytes_lsd)}, + {DRVSTAT_INFO(rx_roce_bytes_msd)}, + {DRVSTAT_INFO(rx_roce_frames)}, + {DRVSTAT_INFO(roce_drops_payload_len)}, + {DRVSTAT_INFO(roce_drops_crc)} }; #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) @@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = { /* Number of times the TX queue was stopped due to lack * of spaces in the TXQ. */ - {DRVSTAT_TX_INFO(tx_stops)} + {DRVSTAT_TX_INFO(tx_stops)}, + /* Pkts dropped in the driver's transmit path */ + {DRVSTAT_TX_INFO(tx_drv_drops)} }; #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) @@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); - struct be_eq_obj *eqo = &adapter->eq_obj[0]; + struct be_aic_obj *aic = &adapter->aic_obj[0]; - et->rx_coalesce_usecs = eqo->cur_eqd; - et->rx_coalesce_usecs_high = eqo->max_eqd; - et->rx_coalesce_usecs_low = eqo->min_eqd; + et->rx_coalesce_usecs = aic->prev_eqd; + et->rx_coalesce_usecs_high = aic->max_eqd; + et->rx_coalesce_usecs_low = aic->min_eqd; - et->tx_coalesce_usecs = eqo->cur_eqd; - et->tx_coalesce_usecs_high = eqo->max_eqd; - et->tx_coalesce_usecs_low = eqo->min_eqd; + et->tx_coalesce_usecs = aic->prev_eqd; + et->tx_coalesce_usecs_high = aic->max_eqd; + et->tx_coalesce_usecs_low = aic->min_eqd; - et->use_adaptive_rx_coalesce = eqo->enable_aic; - et->use_adaptive_tx_coalesce = eqo->enable_aic; + et->use_adaptive_rx_coalesce = aic->enable; + et->use_adaptive_tx_coalesce = aic->enable; return 0; } @@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); + struct be_aic_obj *aic = &adapter->aic_obj[0]; struct be_eq_obj *eqo; int i; for_all_evt_queues(adapter, eqo, i) { - eqo->enable_aic = et->use_adaptive_rx_coalesce; - eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); - eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); - eqo->eqd = et->rx_coalesce_usecs; + aic->enable = et->use_adaptive_rx_coalesce; + aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); + aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd); + aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd); + aic->et_eqd = max(aic->et_eqd, aic->min_eqd); + aic++; } return 0; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e216212160..dc88782185f 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -64,6 +64,9 @@ #define SLIPORT_ERROR_NO_RESOURCE1 0x2 #define SLIPORT_ERROR_NO_RESOURCE2 0x9 +#define SLIPORT_ERROR_FW_RESET1 0x2 +#define SLIPORT_ERROR_FW_RESET2 0x0 + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 2c38cc40211..0fde69d5cb6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -22,6 +22,7 @@ #include <asm/div64.h> #include <linux/aer.h> #include <linux/if_bridge.h> +#include <net/busy_poll.h> MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -306,10 +307,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter) struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va; return &cmd->hw_stats; - } else { + } else if (BE3_chip(adapter)) { struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va; return &cmd->hw_stats; + } else { + struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va; + + return &cmd->hw_stats; } } @@ -320,10 +325,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter) struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); return &hw_stats->erx; - } else { + } else if (BE3_chip(adapter)) { struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); return &hw_stats->erx; + } else { + struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); + + return &hw_stats->erx; } } @@ -422,6 +431,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter) adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; } +static void populate_be_v2_stats(struct be_adapter *adapter) +{ + struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); + struct be_pmem_stats *pmem_sts = &hw_stats->pmem; + struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats_v2 *port_stats = + &rxf_stats->port[adapter->port_num]; + struct be_drv_stats *drvs = &adapter->drv_stats; + + be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); + drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; + drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow_drop; + drvs->rx_address_filtered = port_stats->rx_address_filtered; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; + drvs->jabber_events = port_stats->jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; + if (be_roce_supported(adapter)) { + drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; + drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; + drvs->rx_roce_frames = port_stats->roce_frames_received; + drvs->roce_drops_crc = port_stats->roce_drops_crc; + drvs->roce_drops_payload_len = + port_stats->roce_drops_payload_len; + } +} + static void populate_lancer_stats(struct be_adapter *adapter) { @@ -489,7 +552,7 @@ static void populate_erx_stats(struct be_adapter *adapter, void be_parse_stats(struct be_adapter *adapter) { - struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); + struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter); struct be_rx_obj *rxo; int i; u32 erx_stat; @@ -499,11 +562,13 @@ void be_parse_stats(struct be_adapter *adapter) } else { if (BE2_chip(adapter)) populate_be_v0_stats(adapter); - else - /* for BE3 and Skyhawk */ + else if (BE3_chip(adapter)) + /* for BE3 */ populate_be_v1_stats(adapter); + else + populate_be_v2_stats(adapter); - /* as erx_v1 is longer than v0, ok to use v1 for v0 access */ + /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */ for_all_rx_queues(adapter, rxo, i) { erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; populate_erx_stats(adapter, rxo, erx_stat); @@ -935,8 +1000,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) u32 start = txq->head; skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); - if (!skb) + if (!skb) { + tx_stats(txo)->tx_drv_drops++; return NETDEV_TX_OK; + } wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); @@ -965,6 +1032,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); } else { txq->head = start; + tx_stats(txo)->tx_drv_drops++; dev_kfree_skb_any(skb); } return NETDEV_TX_OK; @@ -1011,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter) vids[num++] = cpu_to_le16(i); status = be_cmd_vlan_config(adapter, adapter->if_handle, - vids, num, 1, 0); + vids, num, 0); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -1275,53 +1343,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev, return status; } -static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) +static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, + ulong now) +{ + aic->rx_pkts_prev = rx_pkts; + aic->tx_reqs_prev = tx_pkts; + aic->jiffies = now; +} + +static void be_eqd_update(struct be_adapter *adapter) { - struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); - ulong now = jiffies; - ulong delta = now - stats->rx_jiffies; - u64 pkts; - unsigned int start, eqd; + struct be_set_eqd set_eqd[MAX_EVT_QS]; + int eqd, i, num = 0, start; + struct be_aic_obj *aic; + struct be_eq_obj *eqo; + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + u64 rx_pkts, tx_pkts; + ulong now; + u32 pps, delta; - if (!eqo->enable_aic) { - eqd = eqo->eqd; - goto modify_eqd; - } + for_all_evt_queues(adapter, eqo, i) { + aic = &adapter->aic_obj[eqo->idx]; + if (!aic->enable) { + if (aic->jiffies) + aic->jiffies = 0; + eqd = aic->et_eqd; + goto modify_eqd; + } - if (eqo->idx >= adapter->num_rx_qs) - return; + rxo = &adapter->rx_obj[eqo->idx]; + do { + start = u64_stats_fetch_begin_bh(&rxo->stats.sync); + rx_pkts = rxo->stats.rx_pkts; + } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); - stats = rx_stats(&adapter->rx_obj[eqo->idx]); + txo = &adapter->tx_obj[eqo->idx]; + do { + start = u64_stats_fetch_begin_bh(&txo->stats.sync); + tx_pkts = txo->stats.tx_reqs; + } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); - /* Wrapped around */ - if (time_before(now, stats->rx_jiffies)) { - stats->rx_jiffies = now; - return; - } - /* Update once a second */ - if (delta < HZ) - return; + /* Skip, if wrapped around or first calculation */ + now = jiffies; + if (!aic->jiffies || time_before(now, aic->jiffies) || + rx_pkts < aic->rx_pkts_prev || + tx_pkts < aic->tx_reqs_prev) { + be_aic_update(aic, rx_pkts, tx_pkts, now); + continue; + } - do { - start = u64_stats_fetch_begin_bh(&stats->sync); - pkts = stats->rx_pkts; - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); - - stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ); - stats->rx_pkts_prev = pkts; - stats->rx_jiffies = now; - eqd = (stats->rx_pps / 110000) << 3; - eqd = min(eqd, eqo->max_eqd); - eqd = max(eqd, eqo->min_eqd); - if (eqd < 10) - eqd = 0; + delta = jiffies_to_msecs(now - aic->jiffies); + pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) + + (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta); + eqd = (pps / 15000) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, aic->max_eqd); + eqd = max_t(u32, eqd, aic->min_eqd); + be_aic_update(aic, rx_pkts, tx_pkts, now); modify_eqd: - if (eqd != eqo->cur_eqd) { - be_cmd_modify_eqd(adapter, eqo->q.id, eqd); - eqo->cur_eqd = eqd; + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = eqo->q.id; + aic->prev_eqd = eqd; + num++; + } } + + if (num) + be_cmd_modify_eqd(adapter, set_eqd, num); } static void be_rx_stats_update(struct be_rx_obj *rxo, @@ -1463,7 +1557,7 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, } /* Process the RX completion indicated by rxcp when GRO is disabled */ -static void be_rx_compl_process(struct be_rx_obj *rxo, +static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, struct be_rx_compl_info *rxcp) { struct be_adapter *adapter = rxo->adapter; @@ -1488,7 +1582,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (netdev->features & NETIF_F_RXHASH) skb->rxhash = rxcp->rss_hash; - + skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -1546,6 +1640,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (adapter->netdev->features & NETIF_F_RXHASH) skb->rxhash = rxcp->rss_hash; + skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -1726,6 +1821,8 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) if (posted) { atomic_add(posted, &rxq->used); + if (rxo->rx_post_starved) + rxo->rx_post_starved = false; be_rxq_notify(adapter, rxq->id, posted); } else if (atomic_read(&rxq->used) == 0) { /* Let be_worker replenish when memory is available */ @@ -1928,6 +2025,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) if (eqo->q.created) { be_eq_clean(eqo); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); + napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); } be_queue_free(adapter, &eqo->q); @@ -1938,6 +2036,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) { struct be_queue_info *eq; struct be_eq_obj *eqo; + struct be_aic_obj *aic; int i, rc; adapter->num_evt_qs = min_t(u16, num_irqs(adapter), @@ -1946,11 +2045,13 @@ static int be_evt_queues_create(struct be_adapter *adapter) for_all_evt_queues(adapter, eqo, i) { netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->tx_budget = BE_TX_BUDGET; eqo->idx = i; - eqo->max_eqd = BE_MAX_EQD; - eqo->enable_aic = true; + aic->max_eqd = BE_MAX_EQD; + aic->enable = true; eq = &eqo->q; rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, @@ -2047,6 +2148,9 @@ static int be_tx_qs_create(struct be_adapter *adapter) if (status) return status; + u64_stats_init(&txo->stats.sync); + u64_stats_init(&txo->stats.sync_compl); + /* If num_evt_qs is less than num_tx_qs, then more than * one txq share an eq */ @@ -2108,6 +2212,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (rc) return rc; + u64_stats_init(&rxo->stats.sync); eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; rc = be_cmd_cq_create(adapter, cq, eq, false, 3); if (rc) @@ -2167,7 +2272,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp) } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, - int budget) + int budget, int polling) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; @@ -2198,10 +2303,12 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, goto loop_continue; } - if (do_gro(rxcp)) + /* Don't do gro when we're busy_polling */ + if (do_gro(rxcp) && polling != BUSY_POLLING) be_rx_compl_process_gro(rxo, napi, rxcp); else - be_rx_compl_process(rxo, rxcp); + be_rx_compl_process(rxo, napi, rxcp); + loop_continue: be_rx_stats_update(rxo, rxcp); } @@ -2209,7 +2316,11 @@ loop_continue: if (work_done) { be_cq_notify(adapter, rx_cq->id, true, work_done); - if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) + /* When an rx-obj gets into post_starved state, just + * let be_worker do the posting. + */ + if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && + !rxo->rx_post_starved) be_post_rx_frags(rxo, GFP_ATOMIC); } @@ -2254,6 +2365,7 @@ int be_poll(struct napi_struct *napi, int budget) struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = eqo->adapter; int max_work = 0, work, i, num_evts; + struct be_rx_obj *rxo; bool tx_done; num_evts = events_get(eqo); @@ -2266,13 +2378,18 @@ int be_poll(struct napi_struct *napi, int budget) max_work = budget; } - /* This loop will iterate twice for EQ0 in which - * completions of the last RXQ (default one) are also processed - * For other EQs the loop iterates only once - */ - for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) { - work = be_process_rx(&adapter->rx_obj[i], napi, budget); - max_work = max(work, max_work); + if (be_lock_napi(eqo)) { + /* This loop will iterate twice for EQ0 in which + * completions of the last RXQ (default one) are also processed + * For other EQs the loop iterates only once + */ + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, budget, NAPI_POLLING); + max_work = max(work, max_work); + } + be_unlock_napi(eqo); + } else { + max_work = budget; } if (is_mcc_eqo(eqo)) @@ -2288,6 +2405,28 @@ int be_poll(struct napi_struct *napi, int budget) return max_work; } +#ifdef CONFIG_NET_RX_BUSY_POLL +static int be_busy_poll(struct napi_struct *napi) +{ + struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = eqo->adapter; + struct be_rx_obj *rxo; + int i, work = 0; + + if (!be_lock_busy_poll(eqo)) + return LL_FLUSH_BUSY; + + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, 4, BUSY_POLLING); + if (work) + break; + } + + be_unlock_busy_poll(eqo); + return work; +} +#endif + void be_detect_error(struct be_adapter *adapter) { u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; @@ -2325,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter) */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->hw_error = true; - dev_err(&adapter->pdev->dev, - "Error detected in the card\n"); + /* Do not log error messages if its a FW reset */ + if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && + sliport_err2 == SLIPORT_ERROR_FW_RESET2) { + dev_info(&adapter->pdev->dev, + "Firmware update in progress\n"); + return; + } else { + dev_err(&adapter->pdev->dev, + "Error detected in the card\n"); + } } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { @@ -2520,8 +2667,10 @@ static int be_close(struct net_device *netdev) be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { - for_all_evt_queues(adapter, eqo, i) + for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); + be_disable_busy_poll(eqo); + } adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; } @@ -2535,6 +2684,11 @@ static int be_close(struct net_device *netdev) be_rx_qs_destroy(adapter); + for (i = 1; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) synchronize_irq(be_msix_vec_get(adapter, eqo)); @@ -2632,6 +2786,7 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); + be_enable_busy_poll(eqo); be_eq_notify(adapter, eqo->q.id, true, false, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -2785,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter) } } -static int be_clear(struct be_adapter *adapter) +static void be_mac_clear(struct be_adapter *adapter) { int i; + if (adapter->pmac_id) { + for (i = 0; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + } +} + +static int be_clear(struct be_adapter *adapter) +{ be_cancel_worker(adapter); if (sriov_enabled(adapter)) be_vf_clear(adapter); /* delete the primary mac along with the uc-mac list */ - for (i = 0; i < (adapter->uc_macs + 1); i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); - adapter->uc_macs = 0; + be_mac_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_clear_queues(adapter); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - be_msix_disable(adapter); return 0; } @@ -2937,7 +3099,8 @@ static int be_vf_setup(struct be_adapter *adapter) goto err; vf_cfg->def_vid = def_vlan; - be_cmd_enable_vf(adapter, vf + 1); + if (!old_vfs) + be_cmd_enable_vf(adapter, vf + 1); } if (!old_vfs) { @@ -2962,12 +3125,12 @@ static void BEx_get_resources(struct be_adapter *adapter, struct pci_dev *pdev = adapter->pdev; bool use_sriov = false; - if (BE3_chip(adapter) && be_physfn(adapter)) { + if (BE3_chip(adapter) && sriov_want(adapter)) { int max_vfs; max_vfs = pci_sriov_get_totalvfs(pdev); res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; - use_sriov = res->max_vfs && num_vfs; + use_sriov = res->max_vfs; } if (be_physfn(adapter)) @@ -2983,8 +3146,9 @@ static void BEx_get_resources(struct be_adapter *adapter, res->max_vlans = BE_NUM_VLANS_SUPPORTED; res->max_mcast_mac = BE_MAX_MC; + /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */ if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || - !be_physfn(adapter)) + !be_physfn(adapter) || (adapter->port_num > 1)) res->max_tx_qs = 1; else res->max_tx_qs = BE3_MAX_TX_QS; @@ -3026,14 +3190,6 @@ static int be_get_resources(struct be_adapter *adapter) adapter->res = res; } - /* For BE3 only check if FW suggests a different max-txqs value */ - if (BE3_chip(adapter)) { - status = be_cmd_get_profile_config(adapter, &res, 0); - if (!status && res.max_tx_qs) - adapter->res.max_tx_qs = - min(adapter->res.max_tx_qs, res.max_tx_qs); - } - /* For Lancer, SH etc read per-function resource limits from FW. * GET_FUNC_CONFIG returns per function guaranteed limits. * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits @@ -3112,12 +3268,10 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* On BE3 VFs this cmd may fail due to lack of privilege. - * Ignore the failure as in this case pmac_id is fetched - * in the IFACE_CREATE cmd. - */ - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); + /* For BE3-R VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) + be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); return 0; } @@ -3247,6 +3401,12 @@ static int be_setup(struct be_adapter *adapter) be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash); + if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { + dev_err(dev, "Firmware on card is old(%s), IRQs may not work.", + adapter->fw_ver); + dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); + } + if (adapter->vlans_added) be_vid_config(adapter); @@ -3258,7 +3418,7 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (be_physfn(adapter) && num_vfs) { + if (sriov_want(adapter)) { if (be_max_vfs(adapter)) be_vf_setup(adapter); else @@ -3667,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter, } if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, + "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { @@ -3900,6 +4062,9 @@ static const struct net_device_ops be_netdev_ops = { #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = be_busy_poll +#endif }; static void be_netdev_init(struct net_device *netdev) @@ -3960,11 +4125,6 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { u8 __iomem *addr; - u32 sli_intf; - - pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); - adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> - SLI_INTF_IF_TYPE_SHIFT; if (BEx_chip(adapter) && be_physfn(adapter)) { adapter->csr = pci_iomap(adapter->pdev, 2, 0); @@ -4077,9 +4237,11 @@ static int be_stats_init(struct be_adapter *adapter) cmd->size = sizeof(struct lancer_cmd_req_pport_stats); else if (BE2_chip(adapter)) cmd->size = sizeof(struct be_cmd_req_get_stats_v0); - else - /* BE3 and Skyhawk */ + else if (BE3_chip(adapter)) cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + else + /* ALL non-BE ASICs */ + cmd->size = sizeof(struct be_cmd_req_get_stats_v2); cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, GFP_KERNEL); @@ -4113,7 +4275,6 @@ static void be_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); - pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); @@ -4219,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter) goto err; } - dev_err(dev, "Error recovery successful\n"); + dev_err(dev, "Adapter recovery successful\n"); return 0; err: if (status == -EAGAIN) dev_err(dev, "Waiting for resource provisioning\n"); else - dev_err(dev, "Error recovery failed\n"); + dev_err(dev, "Adapter recovery failed\n"); return status; } @@ -4262,7 +4423,6 @@ static void be_worker(struct work_struct *work) struct be_adapter *adapter = container_of(work, struct be_adapter, work.work); struct be_rx_obj *rxo; - struct be_eq_obj *eqo; int i; /* when interrupts are not yet enabled, just reap any pending @@ -4287,14 +4447,14 @@ static void be_worker(struct work_struct *work) be_cmd_get_die_temperature(adapter); for_all_rx_queues(adapter, rxo, i) { - if (rxo->rx_post_starved) { - rxo->rx_post_starved = false; + /* Replenish RX-queues starved due to memory + * allocation failures. + */ + if (rxo->rx_post_starved) be_post_rx_frags(rxo, GFP_KERNEL); - } } - for_all_evt_queues(adapter, eqo, i) - be_eqd_update(adapter, eqo); + be_eqd_update(adapter); reschedule: adapter->work_counter++; @@ -4351,28 +4511,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) adapter->netdev = netdev; SET_NETDEV_DEV(netdev, &pdev->dev); - status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!status) { - status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (status < 0) { - dev_err(&pdev->dev, "dma_set_coherent_mask failed\n"); - goto free_netdev; - } netdev->features |= NETIF_F_HIGHDMA; } else { - status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (!status) - status = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; } } - status = pci_enable_pcie_error_reporting(pdev); - if (status) - dev_info(&pdev->dev, "Could not use PCIe error reporting\n"); + if (be_physfn(adapter)) { + status = pci_enable_pcie_error_reporting(pdev); + if (!status) + dev_info(&pdev->dev, "PCIe error reporting enabled\n"); + } status = be_ctrl_init(adapter); if (status) @@ -4443,7 +4597,6 @@ ctrl_clean: be_ctrl_cleanup(adapter); free_netdev: free_netdev(netdev); - pci_set_drvdata(pdev, NULL); rel_reg: pci_release_regions(pdev); disable_dev: @@ -4461,6 +4614,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) if (adapter->wol) be_setup_wol(adapter, true); + be_intr_set(adapter, false); cancel_delayed_work_sync(&adapter->func_recovery_work); netif_device_detach(netdev); @@ -4496,6 +4650,7 @@ static int be_resume(struct pci_dev *pdev) if (status) return status; + be_intr_set(adapter, true); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index c706b7a9397..4b22a9579f8 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, np->mem); free_netdev(dev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); } else printk(KERN_ERR "fealnx: remove for unknown device\n"); } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2793b91cc5..e7c8b749c5a 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev); * detected as not set during a prior frame transmission, then the * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously - * detected as not set during a prior frame transmission, then the - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in * frames not being transmitted until there is a 0-to-1 transition on * ENET_TDAR[TDAR]. */ @@ -385,8 +381,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * data. */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = 0; + fep->tx_skbuff[index] = NULL; + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -772,11 +775,10 @@ fec_enet_tx(struct net_device *ndev) else index = bdp - fep->tx_bd_base; - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - skb = fep->tx_skbuff[index]; + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, + DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -861,6 +863,7 @@ fec_enet_rx(struct net_device *ndev, int budget) struct bufdesc_ex *ebdp = NULL; bool vlan_packet_rcvd = false; u16 vlan_tag; + int index = 0; #ifdef CONFIG_M532x flush_cache_all(); @@ -916,10 +919,15 @@ fec_enet_rx(struct net_device *ndev, int budget) ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - data = (__u8*)__va(bdp->cbd_bufaddr); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->rx_bd_base; + else + index = bdp - fep->rx_bd_base; + data = fep->rx_skbuff[index]->data; + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -999,8 +1007,8 @@ fec_enet_rx(struct net_device *ndev, int budget) napi_gro_receive(&fep->napi, skb); } - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1719,6 +1727,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + fec_enet_free_buffers(ndev); + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 6b60582ce8c..56f2f608a9f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev) mac_addr = of_get_mac_address(ofdev->dev.of_node); if (mac_addr) - memcpy(ndev->dev_addr, mac_addr, 6); + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); ret = fep->ops->allocate_bd(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 7583a9572bc..f8b92864fc5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -32,7 +32,9 @@ #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/phy.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/gfp.h> #include <asm/immap_cpm2.h> @@ -88,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep) struct fs_platform_info *fpi = fep->fpi; int ret = -EINVAL; - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) goto out; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 9ae6cdbcac2..a9a00f39521 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -31,7 +31,9 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/gfp.h> #include <asm/irq.h> @@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct platform_device *ofdev = to_platform_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) return -EINVAL; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 22a02a76706..d37cd4ebac6 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -31,6 +31,8 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/irq.h> @@ -98,7 +100,7 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct platform_device *ofdev = to_platform_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (fep->interrupt == NO_IRQ) return -EINVAL; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 844ecfa84d1..67caaacd19e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -22,6 +22,7 @@ #include <linux/mii.h> #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> +#include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 2f1c46a12f0..ac5d447ff8c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -31,6 +31,7 @@ #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/platform_device.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/pgtable.h> diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c4eaadeb572..b14d7904a07 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -78,6 +78,8 @@ #include <linux/if_vlan.h> #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/ip.h> @@ -88,6 +90,7 @@ #include <asm/io.h> #include <asm/reg.h> +#include <asm/mpc85xx.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <linux/module.h> @@ -939,9 +942,8 @@ static void gfar_init_filer_table(struct gfar_private *priv) } } -static void gfar_detect_errata(struct gfar_private *priv) +static void __gfar_detect_errata_83xx(struct gfar_private *priv) { - struct device *dev = &priv->ofdev->dev; unsigned int pvr = mfspr(SPRN_PVR); unsigned int svr = mfspr(SPRN_SVR); unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ @@ -957,15 +959,33 @@ static void gfar_detect_errata(struct gfar_private *priv) (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) priv->errata |= GFAR_ERRATA_76; - /* MPC8313 and MPC837x all rev */ - if ((pvr == 0x80850010 && mod == 0x80b0) || - (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) - priv->errata |= GFAR_ERRATA_A002; + /* MPC8313 Rev < 2.0 */ + if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) + priv->errata |= GFAR_ERRATA_12; +} + +static void __gfar_detect_errata_85xx(struct gfar_private *priv) +{ + unsigned int svr = mfspr(SPRN_SVR); - /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ - if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || - (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) + if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) priv->errata |= GFAR_ERRATA_12; + if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ +} + +static void gfar_detect_errata(struct gfar_private *priv) +{ + struct device *dev = &priv->ofdev->dev; + + /* no plans to fix */ + priv->errata |= GFAR_ERRATA_A002; + + if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) + __gfar_detect_errata_85xx(priv); + else /* non-mpc85xx parts, i.e. e300 core based */ + __gfar_detect_errata_83xx(priv); if (priv->errata) dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", @@ -1599,7 +1619,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv) /* Normaly TSEC should not hang on GRS commands, so we should * actually wait for IEVENT_GRSC flag. */ - if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) return 0; /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are @@ -2900,7 +2920,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) struct gfar_priv_rx_q *rx_queue = NULL; int work_done = 0, work_done_per_q = 0; int i, budget_per_q = 0; - int has_tx_work; + int has_tx_work = 0; unsigned long rstat_rxf; int num_act_queues; @@ -2915,62 +2935,51 @@ static int gfar_poll(struct napi_struct *napi, int budget) if (num_act_queues) budget_per_q = budget/num_act_queues; - while (1) { - has_tx_work = 0; - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run Tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; } + } - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - /* skip queue if not active */ - if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) - continue; - - rx_queue = priv->rx_queue[i]; - work_done_per_q = - gfar_clean_rx_ring(rx_queue, budget_per_q); - work_done += work_done_per_q; - - /* finished processing this queue */ - if (work_done_per_q < budget_per_q) { - /* clear active queue hw indication */ - gfar_write(®s->rstat, - RSTAT_CLEAR_RXF0 >> i); - rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i); - num_act_queues--; - - if (!num_act_queues) - break; - /* recompute budget per Rx queue */ - budget_per_q = - (budget - work_done) / num_act_queues; - } - } + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + /* skip queue if not active */ + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) + continue; - if (work_done >= budget) - break; + rx_queue = priv->rx_queue[i]; + work_done_per_q = + gfar_clean_rx_ring(rx_queue, budget_per_q); + work_done += work_done_per_q; + + /* finished processing this queue */ + if (work_done_per_q < budget_per_q) { + /* clear active queue hw indication */ + gfar_write(®s->rstat, + RSTAT_CLEAR_RXF0 >> i); + num_act_queues--; + + if (!num_act_queues) + break; + } + } - if (!num_act_queues && !has_tx_work) { + if (!num_act_queues && !has_tx_work) { - napi_complete(napi); + napi_complete(napi); - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); - gfar_write(®s->imask, IMASK_DEFAULT); + gfar_write(®s->imask, IMASK_DEFAULT); - /* If we are coalescing interrupts, update the timer - * Otherwise, clear it - */ - gfar_configure_coalescing(priv, gfargrp->rx_bit_map, - gfargrp->tx_bit_map); - break; - } + /* If we are coalescing interrupts, update the timer + * Otherwise, clear it + */ + gfar_configure_coalescing(priv, gfargrp->rx_bit_map, + gfargrp->tx_bit_map); } return work_done; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 04112b98ff5..114c58f9d8d 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv, *fpr = gfar_read(®s->rqfpr); } -extern void lock_rx_qs(struct gfar_private *priv); -extern void lock_tx_qs(struct gfar_private *priv); -extern void unlock_rx_qs(struct gfar_private *priv); -extern void unlock_tx_qs(struct gfar_private *priv); -extern irqreturn_t gfar_receive(int irq, void *dev_id); -extern int startup_gfar(struct net_device *dev); -extern void stop_gfar(struct net_device *dev); -extern void gfar_halt(struct net_device *dev); -extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, - int enable, u32 regnum, u32 read); -extern void gfar_configure_coalescing_all(struct gfar_private *priv); +void lock_rx_qs(struct gfar_private *priv); +void lock_tx_qs(struct gfar_private *priv); +void unlock_rx_qs(struct gfar_private *priv); +void unlock_tx_qs(struct gfar_private *priv); +irqreturn_t gfar_receive(int irq, void *dev_id); +int startup_gfar(struct net_device *dev); +void stop_gfar(struct net_device *dev); +void gfar_halt(struct net_device *dev); +void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, + u32 regnum, u32 read); +void gfar_configure_coalescing_all(struct gfar_private *priv); void gfar_init_sysfs(struct net_device *dev); int gfar_set_features(struct net_device *dev, netdev_features_t features); -extern void gfar_check_rx_parser_mode(struct gfar_private *priv); -extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); +void gfar_check_rx_parser_mode(struct gfar_private *priv); +void gfar_vlan_mode(struct net_device *dev, netdev_features_t features); extern const struct ethtool_ops gfar_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 5930c39672d..5548b6d00c3 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -31,6 +31,8 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/workqueue.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> @@ -3899,7 +3901,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) mac_addr = of_get_mac_address(np); if (mac_addr) - memcpy(dev->dev_addr, mac_addr, 6); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); ugeth->ug_info = ug_info; ugeth->dev = device; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index c1b6e7e31aa..d449fcb9019 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/mdio.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig index 6231bc02b96..1085257385d 100644 --- a/drivers/net/ethernet/fujitsu/Kconfig +++ b/drivers/net/ethernet/fujitsu/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_FUJITSU bool "Fujitsu devices" default y - depends on ISA || PCMCIA + depends on PCMCIA ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 91227d03274..37860096f74 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev) if (request_irq(dev->irq, hp100_interrupt, lp->bus == HP100_BUS_PCI || lp->bus == HP100_BUS_EISA ? IRQF_SHARED : 0, - "hp100", dev)) { + dev->name, dev)) { printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); return -EAGAIN; } diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index e3881614539..7ce6379fd1a 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev) i596_add_cmd(dev, &lp->cf_cmd.cmd); DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); - memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); + memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); lp->sa_cmd.cmd.command = CmdSASetup; i596_add_cmd(dev, &lp->sa_cmd.cmd); @@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit) err = -ENODEV; goto out; } - memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ + memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ dev->base_addr = MVME_I596_BASE; dev->irq = (unsigned) MVME16x_IRQ_I596; goto found; @@ -1527,9 +1527,7 @@ int __init init_module(void) if (debug >= 0) i596_debug = debug; dev_82596 = i82596_probe(-1); - if (IS_ERR(dev_82596)) - return PTR_ERR(dev_82596); - return 0; + return PTR_ERR_OR_ZERO(dev_82596); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index d653bac4cfc..861fa15e1e8 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev) i596_add_cmd(dev, &dma->cf_cmd.cmd); DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); - memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6); + memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); i596_add_cmd(dev, &dma->sa_cmd.cmd); @@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) { if (!cnt--) break; - memcpy(cp, ha->addr, 6); + memcpy(cp, ha->addr, ETH_ALEN); if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG "%s: Adding address %pM\n", dev->name, cp)); - cp += 6; + cp += ETH_ALEN; } DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); i596_add_cmd(dev, &cmd->cmd); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 2d1c6bdd361..7628e0fd845 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, dev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | + dev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 6b5c7222342..ae342fdb42c 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -39,6 +39,8 @@ #include <linux/bitops.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_net.h> #include <linux/slab.h> @@ -2676,7 +2678,7 @@ static int emac_init_config(struct emac_instance *dev) np->full_name); return -ENXIO; } - memcpy(dev->ndev->dev_addr, p, 6); + memcpy(dev->ndev->dev_addr, p, ETH_ALEN); /* IAHT and GAHT filter parameterization */ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h index 59a92d5870b..9c45efe4c8f 100644 --- a/drivers/net/ethernet/ibm/emac/debug.h +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -29,13 +29,13 @@ struct emac_instance; struct mal_instance; -extern void emac_dbg_register(struct emac_instance *dev); -extern void emac_dbg_unregister(struct emac_instance *dev); -extern void mal_dbg_register(struct mal_instance *mal); -extern void mal_dbg_unregister(struct mal_instance *mal); -extern int emac_init_debug(void) __init; -extern void emac_fini_debug(void) __exit; -extern void emac_dbg_dump_all(void); +void emac_dbg_register(struct emac_instance *dev); +void emac_dbg_unregister(struct emac_instance *dev); +void mal_dbg_register(struct mal_instance *mal); +void mal_dbg_unregister(struct mal_instance *mal); +int emac_init_debug(void) __init; +void emac_fini_debug(void) __exit; +void emac_dbg_dump_all(void); # define DBG_LEVEL 1 diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index dac564c2544..9d75fef6396 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -27,6 +27,7 @@ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/of_irq.h> #include "core.h" #include <asm/dcr-regs.h> @@ -263,7 +264,9 @@ static inline void mal_schedule_poll(struct mal_instance *mal) { if (likely(napi_schedule_prep(&mal->napi))) { MAL_DBG2(mal, "schedule_poll" NL); + spin_lock(&mal->lock); mal_disable_eob_irq(mal); + spin_unlock(&mal->lock); __napi_schedule(&mal->napi); } else MAL_DBG2(mal, "already in poll" NL); @@ -442,15 +445,13 @@ static int mal_poll(struct napi_struct *napi, int budget) if (unlikely(mc->ops->peek_rx(mc->dev) || test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { MAL_DBG2(mal, "rotting packet" NL); - if (napi_reschedule(napi)) - mal_disable_eob_irq(mal); - else - MAL_DBG2(mal, "already in poll list" NL); - - if (budget > 0) - goto again; - else + if (!napi_reschedule(napi)) goto more_work; + + spin_lock_irqsave(&mal->lock, flags); + mal_disable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + goto again; } mc->ops->poll_tx(mc->dev); } diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index c47e23d6eea..4fb2f96da23 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> +#include <linux/of_address.h> #include <asm/io.h> #include "emac.h" diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h index 668bceeff4a..d4f1374d190 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.h +++ b/drivers/net/ethernet/ibm/emac/rgmii.h @@ -56,15 +56,15 @@ struct rgmii_instance { #ifdef CONFIG_IBM_EMAC_RGMII -extern int rgmii_init(void); -extern void rgmii_exit(void); -extern int rgmii_attach(struct platform_device *ofdev, int input, int mode); -extern void rgmii_detach(struct platform_device *ofdev, int input); -extern void rgmii_get_mdio(struct platform_device *ofdev, int input); -extern void rgmii_put_mdio(struct platform_device *ofdev, int input); -extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed); -extern int rgmii_get_regs_len(struct platform_device *ofdev); -extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf); +int rgmii_init(void); +void rgmii_exit(void); +int rgmii_attach(struct platform_device *ofdev, int input, int mode); +void rgmii_detach(struct platform_device *ofdev, int input); +void rgmii_get_mdio(struct platform_device *ofdev, int input); +void rgmii_put_mdio(struct platform_device *ofdev, int input); +void rgmii_set_speed(struct platform_device *ofdev, int input, int speed); +int rgmii_get_regs_len(struct platform_device *ofdev); +void *rgmii_dump_regs(struct platform_device *ofdev, void *buf); #else diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index c231a4a32c4..9f24769ed82 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -18,6 +18,7 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ +#include <linux/of_address.h> #include <asm/io.h> #include "emac.h" diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h index 350b7096a04..4d5f336f07b 100644 --- a/drivers/net/ethernet/ibm/emac/tah.h +++ b/drivers/net/ethernet/ibm/emac/tah.h @@ -72,13 +72,13 @@ struct tah_instance { #ifdef CONFIG_IBM_EMAC_TAH -extern int tah_init(void); -extern void tah_exit(void); -extern int tah_attach(struct platform_device *ofdev, int channel); -extern void tah_detach(struct platform_device *ofdev, int channel); -extern void tah_reset(struct platform_device *ofdev); -extern int tah_get_regs_len(struct platform_device *ofdev); -extern void *tah_dump_regs(struct platform_device *ofdev, void *buf); +int tah_init(void); +void tah_exit(void); +int tah_attach(struct platform_device *ofdev, int channel); +void tah_detach(struct platform_device *ofdev, int channel); +void tah_reset(struct platform_device *ofdev); +int tah_get_regs_len(struct platform_device *ofdev); +void *tah_dump_regs(struct platform_device *ofdev, void *buf); #else diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 4cdf286f7ee..9ca67a38c06 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> +#include <linux/of_address.h> #include <asm/io.h> #include "emac.h" diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h index 455bfb08549..0959c55b145 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.h +++ b/drivers/net/ethernet/ibm/emac/zmii.h @@ -53,15 +53,15 @@ struct zmii_instance { #ifdef CONFIG_IBM_EMAC_ZMII -extern int zmii_init(void); -extern void zmii_exit(void); -extern int zmii_attach(struct platform_device *ofdev, int input, int *mode); -extern void zmii_detach(struct platform_device *ofdev, int input); -extern void zmii_get_mdio(struct platform_device *ofdev, int input); -extern void zmii_put_mdio(struct platform_device *ofdev, int input); -extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed); -extern int zmii_get_regs_len(struct platform_device *ocpdev); -extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf); +int zmii_init(void); +void zmii_exit(void); +int zmii_attach(struct platform_device *ofdev, int input, int *mode); +void zmii_detach(struct platform_device *ofdev, int input); +void zmii_get_mdio(struct platform_device *ofdev, int input); +void zmii_put_mdio(struct platform_device *ofdev, int input); +void zmii_set_speed(struct platform_device *ofdev, int input, int speed); +int zmii_get_regs_len(struct platform_device *ocpdev); +void *zmii_dump_regs(struct platform_device *ofdev, void *buf); #else # define zmii_init() 0 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 5d41aee69d1..952d795230a 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) netdev_for_each_mc_addr(ha, netdev) { /* add the multicast address to the filter table */ unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, ha->addr, 6); + memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); adapter->mac_addr = 0; - memcpy(&adapter->mac_addr, mac_addr_p, 6); + memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); netdev->irq = dev->irq; netdev->netdev_ops = &ibmveth_netdev_ops; diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index bdf5023724e..25045ae0717 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev) free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static const struct net_device_ops ipg_netdev_ops = { diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ada6e210279..cbaba4442d4 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2985,7 +2985,6 @@ err_out_free_res: err_out_disable_pdev: pci_disable_device(pdev); err_out_free_dev: - pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } @@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev) free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 26d9cd59ec7..f9313b36c88 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -83,6 +83,11 @@ struct e1000_adapter; #define E1000_MAX_INTR 10 +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + /* TX/RX descriptor defines */ #define E1000_DEFAULT_TXD 256 #define E1000_MAX_TXD 256 @@ -312,8 +317,6 @@ struct e1000_adapter { struct delayed_work watchdog_task; struct delayed_work fifo_stall_task; struct delayed_work phy_info_task; - - struct mutex mutex; }; enum e1000_state_t { @@ -325,7 +328,7 @@ enum e1000_state_t { #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); #define e_dbg(format, arg...) \ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) #define e_err(msglvl, format, arg...) \ @@ -346,20 +349,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); extern char e1000_driver_name[]; extern const char e1000_driver_version[]; -extern int e1000_up(struct e1000_adapter *adapter); -extern void e1000_down(struct e1000_adapter *adapter); -extern void e1000_reinit_locked(struct e1000_adapter *adapter); -extern void e1000_reset(struct e1000_adapter *adapter); -extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); -extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); -extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); -extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); -extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); -extern void e1000_update_stats(struct e1000_adapter *adapter); -extern bool e1000_has_link(struct e1000_adapter *adapter); -extern void e1000_power_up_phy(struct e1000_adapter *); -extern void e1000_set_ethtool_ops(struct net_device *netdev); -extern void e1000_check_options(struct e1000_adapter *adapter); -extern char *e1000_get_hw_dev_name(struct e1000_hw *hw); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); #endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 59ad007dd5a..46e6544ed1b 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - /* Only kill reset task if adapter is not resetting */ - if (!test_bit(__E1000_RESETTING, &adapter->flags)) - cancel_work_sync(&adapter->reset_task); - cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + cancel_delayed_work_sync(&adapter->phy_info_task); cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); } void e1000_down(struct e1000_adapter *adapter) @@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter) e1000_clean_all_rx_rings(adapter); } -static void e1000_reinit_safe(struct e1000_adapter *adapter) -{ - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - mutex_lock(&adapter->mutex); - e1000_down(adapter); - e1000_up(adapter); - mutex_unlock(&adapter->mutex); - clear_bit(__E1000_RESETTING, &adapter->flags); -} - void e1000_reinit_locked(struct e1000_adapter *adapter) { - /* if rtnl_lock is not held the call path is bogus */ - ASSERT_RTNL(); WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); @@ -1018,19 +1012,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ pci_using_dac = 0; if ((hw->bus_type == e1000_bus_type_pcix) && - !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - /* according to DMA-API-HOWTO, coherent calls will always - * succeed if the set call did - */ - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA config, aborting\n"); goto err_dma; } - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } netdev->netdev_ops = &e1000_netdev_ops; @@ -1321,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) e1000_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); - mutex_init(&adapter->mutex); set_bit(__E1000_DOWN, &adapter->flags); @@ -1445,6 +1433,10 @@ static int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); @@ -2330,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, phy_info_task.work); - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - mutex_lock(&adapter->mutex); + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); - mutex_unlock(&adapter->mutex); } /** @@ -2350,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; u32 tctl; - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - mutex_lock(&adapter->mutex); if (atomic_read(&adapter->tx_fifo_stall)) { if ((er32(TDT) == er32(TDH)) && (er32(TDFT) == er32(TDFH)) && @@ -2373,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) schedule_delayed_work(&adapter->fifo_stall_task, 1); } } - mutex_unlock(&adapter->mutex); } bool e1000_has_link(struct e1000_adapter *adapter) @@ -2427,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work) struct e1000_tx_ring *txdr = adapter->tx_ring; u32 link, tctl; - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - - mutex_lock(&adapter->mutex); link = e1000_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) goto link_up; @@ -2521,7 +2502,7 @@ link_up: adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* exit immediately since reset is imminent */ - goto unlock; + return; } } @@ -2549,9 +2530,6 @@ link_up: /* Reschedule the task */ if (!test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); - -unlock: - mutex_unlock(&adapter->mutex); } enum latency_range { @@ -3500,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, reset_task); - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; e_err(drv, "Reset adapter\n"); - e1000_reinit_safe(adapter); + e1000_reinit_locked(adapter); } /** @@ -3917,8 +3893,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct e1000_tx_ring)), + (unsigned long)(tx_ring - adapter->tx_ring), readl(hw->hw_addr + tx_ring->tdh), readl(hw->hw_addr + tx_ring->tdt), tx_ring->next_to_use, @@ -4969,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); } diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ad0edd11015..0150f7fc893 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -472,26 +472,25 @@ enum latency_range { extern char e1000e_driver_name[]; extern const char e1000e_driver_version[]; -extern void e1000e_check_options(struct e1000_adapter *adapter); -extern void e1000e_set_ethtool_ops(struct net_device *netdev); - -extern int e1000e_up(struct e1000_adapter *adapter); -extern void e1000e_down(struct e1000_adapter *adapter); -extern void e1000e_reinit_locked(struct e1000_adapter *adapter); -extern void e1000e_reset(struct e1000_adapter *adapter); -extern void e1000e_power_up_phy(struct e1000_adapter *adapter); -extern int e1000e_setup_rx_resources(struct e1000_ring *ring); -extern int e1000e_setup_tx_resources(struct e1000_ring *ring); -extern void e1000e_free_rx_resources(struct e1000_ring *ring); -extern void e1000e_free_tx_resources(struct e1000_ring *ring); -extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 - *stats); -extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); -extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); -extern void e1000e_get_hw_control(struct e1000_adapter *adapter); -extern void e1000e_release_hw_control(struct e1000_adapter *adapter); -extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); +void e1000e_check_options(struct e1000_adapter *adapter); +void e1000e_set_ethtool_ops(struct net_device *netdev); + +int e1000e_up(struct e1000_adapter *adapter); +void e1000e_down(struct e1000_adapter *adapter); +void e1000e_reinit_locked(struct e1000_adapter *adapter); +void e1000e_reset(struct e1000_adapter *adapter); +void e1000e_power_up_phy(struct e1000_adapter *adapter); +int e1000e_setup_rx_resources(struct e1000_ring *ring); +int e1000e_setup_tx_resources(struct e1000_ring *ring); +void e1000e_free_rx_resources(struct e1000_ring *ring); +void e1000e_free_tx_resources(struct e1000_ring *ring); +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_get_hw_control(struct e1000_adapter *adapter); +void e1000e_release_hw_control(struct e1000_adapter *adapter); +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); extern unsigned int copybreak; @@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_es2_info; -extern void e1000e_ptp_init(struct e1000_adapter *adapter); -extern void e1000e_ptp_remove(struct e1000_adapter *adapter); +void e1000e_ptp_init(struct e1000_adapter *adapter); +void e1000e_ptp_remove(struct e1000_adapter *adapter); static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) { @@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) return hw->phy.ops.write_reg_locked(hw, offset, data); } -extern void e1000e_reload_nvm_generic(struct e1000_hw *hw); +void e1000e_reload_nvm_generic(struct e1000_hw *hw); static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 4ef786775ac..8d3945ab733 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ -static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) { struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config *config = &adapter->hwtstamp_config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 rxmtrl = 0; @@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) return -ERANGE; } + adapter->hwtstamp_config = *config; + /* enable/disable Tx h/w time stamping */ regval = er32(TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; @@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter); + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - adapter->hwtstamp_config = config; - - ret_val = e1000e_config_hwtstamp(adapter); + ret_val = e1000e_config_hwtstamp(adapter, &config); if (ret_val) return ret_val; - config = adapter->hwtstamp_config; - switch (config.rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -6553,21 +6551,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b5252eb8a6c..1ca9834cdfd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -46,7 +46,6 @@ #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> -#include <linux/version.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> @@ -347,9 +346,9 @@ struct i40e_vsi { u32 rx_buf_failed; u32 rx_page_failed; - /* These are arrays of rings, allocated at run-time */ - struct i40e_ring *rx_rings; - struct i40e_ring *tx_rings; + /* These are containers of ring pointers, allocated at run-time */ + struct i40e_ring **rx_rings; + struct i40e_ring **tx_rings; u16 work_limit; /* high bit set means dynamic, use accessor routines to read/write. @@ -366,7 +365,7 @@ struct i40e_vsi { u8 dtype; /* List of q_vectors allocated to this VSI */ - struct i40e_q_vector *q_vectors; + struct i40e_q_vector **q_vectors; int num_q_vectors; int base_vector; @@ -422,8 +421,9 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ - char name[IFNAMSIZ + 9]; cpumask_t affinity_mask; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; } ____cacheline_internodealigned_in_smp; /* lan device */ @@ -544,6 +544,7 @@ static inline void i40e_dbg_init(void) {} static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8dbd91f64b7..ef4cb1cf31f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; - char dump_request_buf[16]; bool seid_found = false; - int bytes_not_copied; long seid = -1; int buflen = 0; int i, ret; @@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, /* don't allow partial writes */ if (*ppos != 0) return 0; - if (count >= sizeof(dump_request_buf)) - return -ENOSPC; - - bytes_not_copied = copy_from_user(dump_request_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - if (bytes_not_copied > 0) - count -= bytes_not_copied; - dump_request_buf[count] = '\0'; /* decode the SEID given to be dumped */ - ret = kstrtol(dump_request_buf, 0, &seid); - if (ret < 0) { - dev_info(&pf->pdev->dev, "bad seid value '%s'\n", - dump_request_buf); + ret = kstrtol_from_user(buffer, count, 0, &seid); + + if (ret) { + dev_info(&pf->pdev->dev, "bad seid value\n"); } else if (seid == 0) { seid_found = true; @@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, memcpy(p, vsi, len); p += len; - len = (sizeof(struct i40e_q_vector) - * vsi->num_q_vectors); - memcpy(p, vsi->q_vectors, len); - p += len; - - len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); - memcpy(p, vsi->tx_rings, len); - p += len; - memcpy(p, vsi->rx_rings, len); - p += len; + if (vsi->num_q_vectors) { + len = (sizeof(struct i40e_q_vector) + * vsi->num_q_vectors); + memcpy(p, vsi->q_vectors, len); + p += len; + } - for (i = 0; i < vsi->num_queue_pairs; i++) { - len = sizeof(struct i40e_tx_buffer); - memcpy(p, vsi->tx_rings[i].tx_bi, len); + if (vsi->num_queue_pairs) { + len = (sizeof(struct i40e_ring) * + vsi->num_queue_pairs); + memcpy(p, vsi->tx_rings, len); + p += len; + memcpy(p, vsi->rx_rings, len); p += len; } - for (i = 0; i < vsi->num_queue_pairs; i++) { + + if (vsi->tx_rings[0]) { + len = sizeof(struct i40e_tx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->tx_rings[i]->tx_bi, len); + p += len; + } len = sizeof(struct i40e_rx_buffer); - memcpy(p, vsi->rx_rings[i].rx_bi, len); - p += len; + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->rx_rings[i]->rx_bi, len); + p += len; + } } /* macvlan filter list */ @@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", vsi->tx_restart, vsi->tx_busy, vsi->rx_buf_failed, vsi->rx_page_failed); - if (vsi->rx_rings) { - for (i = 0; i < vsi->num_queue_pairs; i++) { - dev_info(&pf->pdev->dev, - " rx_rings[%i]: desc = %p\n", - i, vsi->rx_rings[i].desc); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", - i, vsi->rx_rings[i].dev, - vsi->rx_rings[i].netdev, - vsi->rx_rings[i].rx_bi); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", - i, vsi->rx_rings[i].state, - vsi->rx_rings[i].queue_index, - vsi->rx_rings[i].reg_idx); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", - i, vsi->rx_rings[i].rx_hdr_len, - vsi->rx_rings[i].rx_buf_len, - vsi->rx_rings[i].dtype); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, vsi->rx_rings[i].hsplit, - vsi->rx_rings[i].next_to_use, - vsi->rx_rings[i].next_to_clean, - vsi->rx_rings[i].ring_active); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", - i, vsi->rx_rings[i].rx_stats.packets, - vsi->rx_rings[i].rx_stats.bytes, - vsi->rx_rings[i].rx_stats.non_eop_descs); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", - i, - vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, - vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: size = %i, dma = 0x%08lx\n", - i, vsi->rx_rings[i].size, - (long unsigned int)vsi->rx_rings[i].dma); - dev_info(&pf->pdev->dev, - " rx_rings[%i]: vsi = %p, q_vector = %p\n", - i, vsi->rx_rings[i].vsi, - vsi->rx_rings[i].q_vector); - } + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + if (!rx_ring) + continue; + + dev_info(&pf->pdev->dev, + " rx_rings[%i]: desc = %p\n", + i, rx_ring->desc); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", + i, rx_ring->dev, + rx_ring->netdev, + rx_ring->rx_bi); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, rx_ring->state, + rx_ring->queue_index, + rx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", + i, rx_ring->rx_hdr_len, + rx_ring->rx_buf_len, + rx_ring->dtype); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, rx_ring->hsplit, + rx_ring->next_to_use, + rx_ring->next_to_clean, + rx_ring->ring_active); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", + i, rx_ring->stats.packets, + rx_ring->stats.bytes, + rx_ring->rx_stats.non_eop_descs); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", + i, + rx_ring->rx_stats.alloc_rx_page_failed, + rx_ring->rx_stats.alloc_rx_buff_failed); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, rx_ring->size, + (long unsigned int)rx_ring->dma); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: vsi = %p, q_vector = %p\n", + i, rx_ring->vsi, + rx_ring->q_vector); } - if (vsi->tx_rings) { - for (i = 0; i < vsi->num_queue_pairs; i++) { - dev_info(&pf->pdev->dev, - " tx_rings[%i]: desc = %p\n", - i, vsi->tx_rings[i].desc); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", - i, vsi->tx_rings[i].dev, - vsi->tx_rings[i].netdev, - vsi->tx_rings[i].tx_bi); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", - i, vsi->tx_rings[i].state, - vsi->tx_rings[i].queue_index, - vsi->tx_rings[i].reg_idx); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: dtype = %d\n", - i, vsi->tx_rings[i].dtype); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, vsi->tx_rings[i].hsplit, - vsi->tx_rings[i].next_to_use, - vsi->tx_rings[i].next_to_clean, - vsi->tx_rings[i].ring_active); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", - i, vsi->tx_rings[i].tx_stats.packets, - vsi->tx_rings[i].tx_stats.bytes, - vsi->tx_rings[i].tx_stats.restart_queue); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", - i, - vsi->tx_rings[i].tx_stats.tx_busy, - vsi->tx_rings[i].tx_stats.completed, - vsi->tx_rings[i].tx_stats.tx_done_old); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: size = %i, dma = 0x%08lx\n", - i, vsi->tx_rings[i].size, - (long unsigned int)vsi->tx_rings[i].dma); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: vsi = %p, q_vector = %p\n", - i, vsi->tx_rings[i].vsi, - vsi->tx_rings[i].q_vector); - dev_info(&pf->pdev->dev, - " tx_rings[%i]: DCB tc = %d\n", - i, vsi->tx_rings[i].dcb_tc); - } + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + dev_info(&pf->pdev->dev, + " tx_rings[%i]: desc = %p\n", + i, tx_ring->desc); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", + i, tx_ring->dev, + tx_ring->netdev, + tx_ring->tx_bi); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, tx_ring->state, + tx_ring->queue_index, + tx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dtype = %d\n", + i, tx_ring->dtype); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->hsplit, + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->ring_active); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", + i, tx_ring->stats.packets, + tx_ring->stats.bytes, + tx_ring->tx_stats.restart_queue); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + i, + tx_ring->tx_stats.tx_busy, + tx_ring->tx_stats.tx_done_old); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, tx_ring->size, + (long unsigned int)tx_ring->dma); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: vsi = %p, q_vector = %p\n", + i, tx_ring->vsi, + tx_ring->q_vector); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: DCB tc = %d\n", + i, tx_ring->dcb_tc); } + rcu_read_unlock(); dev_info(&pf->pdev->dev, " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", vsi->work_limit, vsi->rx_itr_setting, @@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); - if (vsi->q_vectors) { - for (i = 0; i < vsi->num_q_vectors; i++) { - dev_info(&pf->pdev->dev, - " q_vectors[%i]: base index = %ld\n", - i, ((long int)*vsi->q_vectors[i].rx.ring- - (long int)*vsi->q_vectors[0].rx.ring)/ - sizeof(struct i40e_ring)); - } - } dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); @@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, return; } if (is_rx_ring) - ring = vsi->rx_rings[ring_id]; + ring = *vsi->rx_rings[ring_id]; else - ring = vsi->tx_rings[ring_id]; + ring = *vsi->tx_rings[ring_id]; if (cnt == 2) { dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); @@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; int bytes_not_copied; struct i40e_vsi *vsi; u8 *print_buf_start; u8 *print_buf; - char *cmd_buf; int vsi_seid; int veb_seid; int cnt; @@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp, count -= bytes_not_copied; cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); if (!print_buf_start) goto command_write_done; @@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, i40e_veb_release(pf->veb[i]); } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { - u8 ma[6]; - int vlan = 0; struct i40e_mac_filter *f; + int vlan = 0; + u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], @@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ma, vlan, vsi_seid, f, ret); } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { - u8 ma[6]; int vlan = 0; + u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], @@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ma, vlan, vsi_seid, ret); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { - int v; - u16 vid; i40e_status ret; + u16 vid; + int v; cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); if (cnt != 2) { @@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { struct i40e_fdir_data fd_data; - int ret; u16 packet_len, i, j = 0; char *asc_packet; bool add = false; + int ret; asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, GFP_KERNEL); @@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { + u16 llen, rlen; int ret, i; u8 *buff; - u16 llen, rlen; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { + u16 llen, rlen; int ret, i; u8 *buff; - u16 llen, rlen; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - /* Read at least 512 words */ - if (buffer_len == 0) - buffer_len = 512; + /* set the max length */ + buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); bytes = 2 * buffer_len; + + /* read at least 1k bytes, no more than 4kB */ + bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); buff = kzalloc(bytes, GFP_KERNEL); if (!buff) goto command_write_done; @@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, struct i40e_pf *pf = filp->private_data; int bytes_not_copied; struct i40e_vsi *vsi; + char *buf_tmp; int vsi_seid; int i, cnt; @@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, count -= bytes_not_copied; i40e_dbg_netdev_ops_buf[count] = '\0'; + buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); + if (buf_tmp) { + *buf_tmp = '\0'; + count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; + } + if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); if (cnt != 1) { @@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, goto netdev_ops_write_done; } for (i = 0; i < vsi->num_q_vectors; i++) - napi_schedule(&vsi->q_vectors[i].napi); + napi_schedule(&vsi->q_vectors[i]->napi); dev_info(&pf->pdev->dev, "napi called\n"); } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", @@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { - struct dentry *pfile __attribute__((unused)); + struct dentry *pfile; const char *name = pci_name(pf->pdev); + const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); - if (pf->i40e_dbg_pf) { - pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, - pf, &i40e_dbg_command_fops); - pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_dump_fops); - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, - pf, &i40e_dbg_netdev_ops_fops); - } else { - dev_info(&pf->pdev->dev, - "debugfs entry for %s failed\n", name); - } + if (!pf->i40e_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_dump_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->i40e_dbg_pf); + return; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a76b8cec76..1b86138fa9e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev, ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; - ring->rx_pending = vsi->rx_rings[0].count; - ring->tx_pending = vsi->tx_rings[0].count; + ring->rx_pending = vsi->rx_rings[0]->count; + ring->tx_pending = vsi->tx_rings[0]->count; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } @@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev, new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ - if ((new_tx_count == vsi->tx_rings[0].count) && - (new_rx_count == vsi->rx_rings[0].count)) + if ((new_tx_count == vsi->tx_rings[0]->count) && + (new_rx_count == vsi->rx_rings[0]->count)) return 0; while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) @@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev, if (!netif_running(vsi->netdev)) { /* simple case - set for the next time the netdev is started */ for (i = 0; i < vsi->num_queue_pairs; i++) { - vsi->tx_rings[i].count = new_tx_count; - vsi->rx_rings[i].count = new_rx_count; + vsi->tx_rings[i]->count = new_tx_count; + vsi->rx_rings[i]->count = new_rx_count; } goto done; } @@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev, */ /* alloc updated Tx resources */ - if (new_tx_count != vsi->tx_rings[0].count) { + if (new_tx_count != vsi->tx_rings[0]->count) { netdev_info(netdev, "Changing Tx descriptor count from %d to %d.\n", - vsi->tx_rings[0].count, new_tx_count); + vsi->tx_rings[0]->count, new_tx_count); tx_rings = kcalloc(vsi->alloc_queue_pairs, sizeof(struct i40e_ring), GFP_KERNEL); if (!tx_rings) { @@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev, for (i = 0; i < vsi->num_queue_pairs; i++) { /* clone ring and setup updated count */ - tx_rings[i] = vsi->tx_rings[i]; + tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_count; err = i40e_setup_tx_descriptors(&tx_rings[i]); if (err) { @@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev, } /* alloc updated Rx resources */ - if (new_rx_count != vsi->rx_rings[0].count) { + if (new_rx_count != vsi->rx_rings[0]->count) { netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", - vsi->rx_rings[0].count, new_rx_count); + vsi->rx_rings[0]->count, new_rx_count); rx_rings = kcalloc(vsi->alloc_queue_pairs, sizeof(struct i40e_ring), GFP_KERNEL); if (!rx_rings) { @@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev, for (i = 0; i < vsi->num_queue_pairs; i++) { /* clone ring and setup updated count */ - rx_rings[i] = vsi->rx_rings[i]; + rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) { @@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev, if (tx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) { - i40e_free_tx_resources(&vsi->tx_rings[i]); - vsi->tx_rings[i] = tx_rings[i]; + i40e_free_tx_resources(vsi->tx_rings[i]); + *vsi->tx_rings[i] = tx_rings[i]; } kfree(tx_rings); tx_rings = NULL; @@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev, if (rx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) { - i40e_free_rx_resources(&vsi->rx_rings[i]); - vsi->rx_rings[i] = rx_rings[i]; + i40e_free_rx_resources(vsi->rx_rings[i]); + *vsi->rx_rings[i] = rx_rings[i]; } kfree(rx_rings); rx_rings = NULL; @@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, char *p; int j; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); + unsigned int start; i40e_update_stats(vsi); @@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < vsi->num_queue_pairs; j++) { - data[i++] = vsi->tx_rings[j].tx_stats.packets; - data[i++] = vsi->tx_rings[j].tx_stats.bytes; - } - for (j = 0; j < vsi->num_queue_pairs; j++) { - data[i++] = vsi->rx_rings[j].rx_stats.packets; - data[i++] = vsi->rx_rings[j].rx_stats.bytes; + rcu_read_lock(); + for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { + struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + struct i40e_ring *rx_ring; + + if (!tx_ring) + continue; + + /* process Tx ring statistics */ + do { + start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + data[i] = tx_ring->stats.packets; + data[i + 1] = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); + + /* Rx ring is the 2nd half of the queue pair */ + rx_ring = &tx_ring[1]; + do { + start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + data[i + 2] = rx_ring->stats.packets; + data[i + 3] = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); } + rcu_read_unlock(); if (vsi == pf->vsi[pf->lan_vsi]) { for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { p = (char *)pf + i40e_gstrings_stats[j].stat_offset; @@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); p += ETH_GSTRING_LEN; - } - for (i = 0; i < vsi->num_queue_pairs; i++) { snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); @@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev, } vector = vsi->base_vector; - q_vector = vsi->q_vectors; - for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 221aa479501..12b0932204b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -36,7 +36,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 9 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -347,14 +347,56 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) **/ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, - struct rtnl_link_stats64 *storage) + struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + int i; + + if (!vsi->tx_rings) + return stats; + + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *tx_ring, *rx_ring; + u64 bytes, packets; + unsigned int start; + + tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + + do { + start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); - *storage = *i40e_get_vsi_stats_struct(vsi); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + rx_ring = &tx_ring[1]; + + do { + start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); - return storage; + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + rcu_read_unlock(); + + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = vsi_stats->multicast; + stats->tx_errors = vsi_stats->tx_errors; + stats->tx_dropped = vsi_stats->tx_dropped; + stats->rx_errors = vsi_stats->rx_errors; + stats->rx_crc_errors = vsi_stats->rx_crc_errors; + stats->rx_length_errors = vsi_stats->rx_length_errors; + + return stats; } /** @@ -376,10 +418,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings) for (i = 0; i < vsi->num_queue_pairs; i++) { - memset(&vsi->rx_rings[i].rx_stats, 0 , - sizeof(vsi->rx_rings[i].rx_stats)); - memset(&vsi->tx_rings[i].tx_stats, 0, - sizeof(vsi->tx_rings[i].tx_stats)); + memset(&vsi->rx_rings[i]->stats, 0 , + sizeof(vsi->rx_rings[i]->stats)); + memset(&vsi->rx_rings[i]->rx_stats, 0 , + sizeof(vsi->rx_rings[i]->rx_stats)); + memset(&vsi->tx_rings[i]->stats, 0 , + sizeof(vsi->tx_rings[i]->stats)); + memset(&vsi->tx_rings[i]->tx_stats, 0, + sizeof(vsi->tx_rings[i]->tx_stats)); } vsi->stat_offsets_loaded = false; } @@ -598,7 +644,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) continue; for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = &vsi->tx_rings[i]; + struct i40e_ring *ring = vsi->tx_rings[i]; clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); } } @@ -652,7 +698,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) continue; for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *ring = &vsi->tx_rings[i]; + struct i40e_ring *ring = vsi->tx_rings[i]; tc = ring->dcb_tc; if (xoff[tc]) @@ -704,21 +750,38 @@ void i40e_update_stats(struct i40e_vsi *vsi) tx_restart = tx_busy = 0; rx_page = 0; rx_buf = 0; + rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { struct i40e_ring *p; + u64 bytes, packets; + unsigned int start; - p = &vsi->rx_rings[q]; - rx_b += p->rx_stats.bytes; - rx_p += p->rx_stats.packets; - rx_buf += p->rx_stats.alloc_rx_buff_failed; - rx_page += p->rx_stats.alloc_rx_page_failed; + /* locate Tx ring */ + p = ACCESS_ONCE(vsi->tx_rings[q]); - p = &vsi->tx_rings[q]; - tx_b += p->tx_stats.bytes; - tx_p += p->tx_stats.packets; + do { + start = u64_stats_fetch_begin_bh(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + tx_b += bytes; + tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; + + /* Rx queue is part of the same block as Tx queue */ + p = &p[1]; + do { + start = u64_stats_fetch_begin_bh(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + rx_b += bytes; + rx_p += packets; + rx_buf += p->rx_stats.alloc_rx_buff_failed; + rx_page += p->rx_stats.alloc_rx_page_failed; } + rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->rx_page_failed = rx_page; @@ -1988,7 +2051,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) - err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); + err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); return err; } @@ -2004,8 +2067,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) int i; for (i = 0; i < vsi->num_queue_pairs; i++) - if (vsi->tx_rings[i].desc) - i40e_free_tx_resources(&vsi->tx_rings[i]); + if (vsi->tx_rings[i]->desc) + i40e_free_tx_resources(vsi->tx_rings[i]); } /** @@ -2023,7 +2086,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) - err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); + err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); return err; } @@ -2038,8 +2101,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) int i; for (i = 0; i < vsi->num_queue_pairs; i++) - if (vsi->rx_rings[i].desc) - i40e_free_rx_resources(&vsi->rx_rings[i]); + if (vsi->rx_rings[i]->desc) + i40e_free_rx_resources(vsi->rx_rings[i]); } /** @@ -2114,8 +2177,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) /* Now associate this queue with this PCI function */ qtx_ctl = I40E_QTX_CTL_PF_QUEUE; - qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) - & I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); @@ -2223,8 +2286,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) int err = 0; u16 i; - for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) - err = i40e_configure_tx_ring(&vsi->tx_rings[i]); + for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) + err = i40e_configure_tx_ring(vsi->tx_rings[i]); return err; } @@ -2274,7 +2337,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) - err = i40e_configure_rx_ring(&vsi->rx_rings[i]); + err = i40e_configure_rx_ring(vsi->rx_rings[i]); return err; } @@ -2298,8 +2361,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) qoffset = vsi->tc_config.tc_info[n].qoffset; qcount = vsi->tc_config.tc_info[n].qcount; for (i = qoffset; i < (qoffset + qcount); i++) { - struct i40e_ring *rx_ring = &vsi->rx_rings[i]; - struct i40e_ring *tx_ring = &vsi->tx_rings[i]; + struct i40e_ring *rx_ring = vsi->rx_rings[i]; + struct i40e_ring *tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = n; tx_ring->dcb_tc = n; } @@ -2354,8 +2417,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) */ qp = vsi->base_queue; vector = vsi->base_vector; - q_vector = vsi->q_vectors; - for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -2435,7 +2498,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw) **/ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) { - struct i40e_q_vector *q_vector = vsi->q_vectors; + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 val; @@ -2472,7 +2535,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ -static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; @@ -2500,7 +2563,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - i40e_flush(hw); + /* skip the flush */ } /** @@ -2512,7 +2575,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) { struct i40e_q_vector *q_vector = data; - if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) + if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; napi_schedule(&q_vector->napi); @@ -2529,7 +2592,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) { struct i40e_q_vector *q_vector = data; - if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) + if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; pr_info("fdir ring cleaning needed\n"); @@ -2554,16 +2617,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int vector, err; for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); + struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; - if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { + if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); tx_int_idx++; - } else if (q_vector->rx.ring[0]) { + } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "rx", rx_int_idx++); - } else if (q_vector->tx.ring[0]) { + } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "tx", tx_int_idx++); } else { @@ -2611,8 +2674,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) int i; for (i = 0; i < vsi->num_queue_pairs; i++) { - wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); - wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); + wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); + wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); } if (pf->flags & I40E_FLAG_MSIX_ENABLED) { @@ -2649,6 +2712,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) i40e_irq_dynamic_enable_icr0(pf); } + i40e_flush(&pf->hw); return 0; } @@ -2681,14 +2745,14 @@ static irqreturn_t i40e_intr(int irq, void *data) icr0 = rd32(hw, I40E_PFINT_ICR0); - /* if sharing a legacy IRQ, we might get called w/o an intr pending */ - if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) - return IRQ_NONE; - val = rd32(hw, I40E_PFINT_DYN_CTL0); val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; wr32(hw, I40E_PFINT_DYN_CTL0, val); + /* if sharing a legacy IRQ, we might get called w/o an intr pending */ + if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) + return IRQ_NONE; + ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ @@ -2702,10 +2766,9 @@ static irqreturn_t i40e_intr(int irq, void *data) qval = rd32(hw, I40E_QINT_TQCTL(0)); qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_flush(hw); if (!test_bit(__I40E_DOWN, &pf->state)) - napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); + napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -2764,7 +2827,6 @@ static irqreturn_t i40e_intr(int irq, void *data) /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); - i40e_flush(hw); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf); @@ -2774,40 +2836,26 @@ static irqreturn_t i40e_intr(int irq, void *data) } /** - * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector + * i40e_map_vector_to_qp - Assigns the queue pair to the vector * @vsi: the VSI being configured * @v_idx: vector index - * @r_idx: rx queue index + * @qp_idx: queue pair index **/ -static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) +static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { - struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); - struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); - - rx_ring->q_vector = q_vector; - q_vector->rx.ring[q_vector->rx.count] = rx_ring; - q_vector->rx.count++; - q_vector->rx.latency_range = I40E_LOW_LATENCY; - q_vector->vsi = vsi; -} - -/** - * i40e_map_vector_to_txq - Assigns the Tx queue to the vector - * @vsi: the VSI being configured - * @v_idx: vector index - * @t_idx: tx queue index - **/ -static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx) -{ - struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); - struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]); + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; + struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; tx_ring->q_vector = q_vector; - q_vector->tx.ring[q_vector->tx.count] = tx_ring; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; q_vector->tx.count++; - q_vector->tx.latency_range = I40E_LOW_LATENCY; - q_vector->num_ringpairs++; - q_vector->vsi = vsi; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; } /** @@ -2823,7 +2871,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) { int qp_remaining = vsi->num_queue_pairs; int q_vectors = vsi->num_q_vectors; - int qp_per_vector; + int num_ringpairs; int v_start = 0; int qp_idx = 0; @@ -2831,11 +2879,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) * group them so there are multiple queues per vector. */ for (; v_start < q_vectors && qp_remaining; v_start++) { - qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); - for (; qp_per_vector; - qp_per_vector--, qp_idx++, qp_remaining--) { - map_vector_to_rxq(vsi, v_start, qp_idx); - map_vector_to_txq(vsi, v_start, qp_idx); + struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + + while (num_ringpairs--) { + map_vector_to_qp(vsi, v_start, qp_idx); + qp_idx++; + qp_remaining--; } } } @@ -2887,7 +2945,7 @@ static void i40e_netpoll(struct net_device *netdev) pf->flags |= I40E_FLAG_IN_NETPOLL; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) - i40e_msix_clean_rings(0, &vsi->q_vectors[i]); + i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } @@ -3073,14 +3131,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) u16 vector = i + base; /* free only the irqs that were actually requested */ - if (vsi->q_vectors[i].num_ringpairs == 0) + if (vsi->q_vectors[i]->num_ringpairs == 0) continue; /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); free_irq(pf->msix_entries[vector].vector, - &vsi->q_vectors[i]); + vsi->q_vectors[i]); /* Tear down the interrupt queue link list * @@ -3164,6 +3222,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) } /** + * i40e_free_q_vector - Free memory allocated for specific interrupt vector + * @vsi: the VSI being configured + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *ring; + + if (!q_vector) + return; + + /* disassociate q_vector from rings */ + i40e_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + + i40e_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI w/ an associated netdev is set up w/ NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + vsi->q_vectors[v_idx] = NULL; + + kfree_rcu(q_vector, rcu); +} + +/** * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors * @vsi: the VSI being un-configured * @@ -3174,24 +3265,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) { int v_idx; - for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { - struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; - int r_idx; - - if (!q_vector) - continue; - - /* disassociate q_vector from rings */ - for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++) - q_vector->tx.ring[r_idx]->q_vector = NULL; - for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++) - q_vector->rx.ring[r_idx]->q_vector = NULL; - - /* only VSI w/ an associated netdev is set up w/ NAPI */ - if (vsi->netdev) - netif_napi_del(&q_vector->napi); - } - kfree(vsi->q_vectors); + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + i40e_free_q_vector(vsi, v_idx); } /** @@ -3241,7 +3316,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx].napi); + napi_enable(&vsi->q_vectors[q_idx]->napi); } /** @@ -3256,7 +3331,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx].napi); + napi_disable(&vsi->q_vectors[q_idx]->napi); } /** @@ -3703,8 +3778,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi) if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && (vsi->netdev)) { + netdev_info(vsi->netdev, "NIC Link is Up\n"); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); + } else if (vsi->netdev) { + netdev_info(vsi->netdev, "NIC Link is Down\n"); } i40e_service_event_schedule(pf); @@ -3772,8 +3850,8 @@ void i40e_down(struct i40e_vsi *vsi) i40e_napi_disable_all(vsi); for (i = 0; i < vsi->num_queue_pairs; i++) { - i40e_clean_tx_ring(&vsi->tx_rings[i]); - i40e_clean_rx_ring(&vsi->rx_rings[i]); + i40e_clean_tx_ring(vsi->tx_rings[i]); + i40e_clean_rx_ring(vsi->rx_rings[i]); } } @@ -4153,8 +4231,9 @@ static void i40e_link_event(struct i40e_pf *pf) if (new_link == old_link) return; - netdev_info(pf->vsi[pf->lan_vsi]->netdev, - "NIC Link is %s\n", (new_link ? "Up" : "Down")); + if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) + netdev_info(pf->vsi[pf->lan_vsi]->netdev, + "NIC Link is %s\n", (new_link ? "Up" : "Down")); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. @@ -4199,9 +4278,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) continue; for (i = 0; i < vsi->num_queue_pairs; i++) { - set_check_for_tx_hang(&vsi->tx_rings[i]); + set_check_for_tx_hang(vsi->tx_rings[i]); if (test_bit(__I40E_HANG_CHECK_ARMED, - &vsi->tx_rings[i].state)) + &vsi->tx_rings[i]->state)) armed++; } @@ -4537,7 +4616,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf) bool new_vsi = false; int err, i; - if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) + if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED | + I40E_FLAG_FDIR_ATR_ENABLED))) return; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; @@ -4937,6 +5017,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) { int ret = -ENODEV; struct i40e_vsi *vsi; + int sz_vectors; + int sz_rings; int vsi_idx; int i; @@ -4962,14 +5044,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi_idx = i; /* Found one! */ } else { ret = -ENODEV; - goto err_alloc_vsi; /* out of VSI slots! */ + goto unlock_pf; /* out of VSI slots! */ } pf->next_vsi = ++i; vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); if (!vsi) { ret = -ENOMEM; - goto err_alloc_vsi; + goto unlock_pf; } vsi->type = type; vsi->back = pf; @@ -4982,14 +5064,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); - i40e_set_num_rings_in_vsi(vsi); + ret = i40e_set_num_rings_in_vsi(vsi); + if (ret) + goto err_rings; + + /* allocate memory for ring pointers */ + sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; + vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL); + if (!vsi->tx_rings) { + ret = -ENOMEM; + goto err_rings; + } + vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; + + /* allocate memory for q_vector pointers */ + sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; + vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL); + if (!vsi->q_vectors) { + ret = -ENOMEM; + goto err_vectors; + } /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; -err_alloc_vsi: + goto unlock_pf; + +err_vectors: + kfree(vsi->tx_rings); +err_rings: + pf->next_vsi = i - 1; + kfree(vsi); +unlock_pf: mutex_unlock(&pf->switch_mutex); return ret; } @@ -5030,6 +5138,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); + /* free the ring and vector containers */ + kfree(vsi->q_vectors); + kfree(vsi->tx_rings); + pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) pf->next_vsi = vsi->idx; @@ -5043,34 +5155,40 @@ free_vsi: } /** + * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being cleaned + **/ +static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings[0]) + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + vsi->rx_rings[i] = NULL; + } + + return 0; +} + +/** * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being configured **/ static int i40e_alloc_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int ret = 0; int i; - vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs, - sizeof(struct i40e_ring), GFP_KERNEL); - if (!vsi->rx_rings) { - ret = -ENOMEM; - goto err_alloc_rings; - } - - vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs, - sizeof(struct i40e_ring), GFP_KERNEL); - if (!vsi->tx_rings) { - ret = -ENOMEM; - kfree(vsi->rx_rings); - goto err_alloc_rings; - } - /* Set basic values in the rings to be used later during open() */ for (i = 0; i < vsi->alloc_queue_pairs; i++) { - struct i40e_ring *rx_ring = &vsi->rx_rings[i]; - struct i40e_ring *tx_ring = &vsi->tx_rings[i]; + struct i40e_ring *tx_ring; + struct i40e_ring *rx_ring; + + tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + if (!tx_ring) + goto err_out; tx_ring->queue_index = i; tx_ring->reg_idx = vsi->base_queue + i; @@ -5081,7 +5199,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->count = vsi->num_desc; tx_ring->size = 0; tx_ring->dcb_tc = 0; + vsi->tx_rings[i] = tx_ring; + rx_ring = &tx_ring[1]; rx_ring->queue_index = i; rx_ring->reg_idx = vsi->base_queue + i; rx_ring->ring_active = false; @@ -5095,24 +5215,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) set_ring_16byte_desc_enabled(rx_ring); else clear_ring_16byte_desc_enabled(rx_ring); - } - -err_alloc_rings: - return ret; -} - -/** - * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI - * @vsi: the VSI being cleaned - **/ -static int i40e_vsi_clear_rings(struct i40e_vsi *vsi) -{ - if (vsi) { - kfree(vsi->rx_rings); - kfree(vsi->tx_rings); + vsi->rx_rings[i] = rx_ring; } return 0; + +err_out: + i40e_vsi_clear_rings(vsi); + return -ENOMEM; } /** @@ -5249,6 +5359,38 @@ static int i40e_init_msix(struct i40e_pf *pf) } /** + * i40e_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the vsi struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, + i40e_napi_poll, vsi->work_limit); + + q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->tx.latency_range = I40E_LOW_LATENCY; + + /* tie q_vector and vsi together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** * i40e_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * @@ -5259,6 +5401,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int v_idx, num_q_vectors; + int err; /* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -5268,22 +5411,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) else return -EINVAL; - vsi->q_vectors = kcalloc(num_q_vectors, - sizeof(struct i40e_q_vector), - GFP_KERNEL); - if (!vsi->q_vectors) - return -ENOMEM; - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - vsi->q_vectors[v_idx].vsi = vsi; - vsi->q_vectors[v_idx].v_idx = v_idx; - cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); - if (vsi->netdev) - netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi, - i40e_napi_poll, vsi->work_limit); + err = i40e_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; } return 0; + +err_out: + while (v_idx--) + i40e_free_q_vector(vsi, v_idx); + + return err; } /** @@ -5297,7 +5437,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_init_msix(pf); if (err) { - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | + I40E_FLAG_RSS_ENABLED | I40E_FLAG_MQ_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | @@ -5312,14 +5453,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { + dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); err = pci_enable_msi(pf->pdev); if (err) { - dev_info(&pf->pdev->dev, - "MSI init failed (%d), trying legacy.\n", err); + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } } + if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); + /* track first vector for misc interrupts */ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); } @@ -5950,7 +6094,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) int ret = -ENOENT; struct i40e_pf *pf = vsi->back; - if (vsi->q_vectors) { + if (vsi->q_vectors[0]) { dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", vsi->seid); return -EEXIST; @@ -5972,8 +6116,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) goto vector_setup_out; } - vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, - vsi->num_q_vectors, vsi->idx); + if (vsi->num_q_vectors) + vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, + vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, "failed to get q tracking for VSI %d, err=%d\n", @@ -7062,8 +7207,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; pf->vsi = kzalloc(len, GFP_KERNEL); - if (!pf->vsi) + if (!pf->vsi) { + err = -ENOMEM; goto err_switch_setup; + } err = i40e_setup_pf_switch(pf); if (err) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 49d2cfa9b0c..f1f03bc5c72 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); } +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_input: Packet data that will be filter parameters @@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_desc; struct i40e_ring *tx_ring; + unsigned int fpt, dcc; struct i40e_vsi *vsi; struct device *dev; dma_addr_t dma; @@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, if (!vsi) return -ENOENT; - tx_ring = &vsi->tx_rings[0]; + tx_ring = vsi->tx_rings[0]; dev = tx_ring->dev; dma = dma_map_single(dev, fdir_data->raw_packet, - I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); + I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_fail; /* grab the next descriptor */ - fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); - tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; - tx_ring->next_to_use++; - if (tx_ring->next_to_use == tx_ring->count) - tx_ring->next_to_use = 0; + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + tx_buf = &tx_ring->tx_bi[i]; + + tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; - fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index - << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) - & I40E_TXD_FLTR_QW0_QINDEX_MASK); + fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; - fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off - << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) - & I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK; - fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype - << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - & I40E_TXD_FLTR_QW0_PCTYPE_MASK); + fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK; /* Use LAN VSI Id if not programmed by user */ if (fdir_data->dest_vsi == 0) - fdir_desc->qindex_flex_ptype_vsi |= - cpu_to_le32((pf->vsi[pf->lan_vsi]->id) - << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); + fpt |= (pf->vsi[pf->lan_vsi]->id) << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; else - fdir_desc->qindex_flex_ptype_vsi |= - cpu_to_le32((fdir_data->dest_vsi - << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - & I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + fpt |= ((u32)fdir_data->dest_vsi << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK; - fdir_desc->dtype_cmd_cntindex = - cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG); + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt); + + dcc = I40E_TX_DESC_DTYPE_FILTER_PROG; if (add) - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE - << I40E_TXD_FLTR_QW1_PCMD_SHIFT); + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; else - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE - << I40E_TXD_FLTR_QW1_PCMD_SHIFT); + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl - << I40E_TXD_FLTR_QW1_DEST_SHIFT) - & I40E_TXD_FLTR_QW1_DEST_MASK); + dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK; - fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( - (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - & I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK; if (fdir_data->cnt_index != 0) { - fdir_desc->dtype_cmd_cntindex |= - cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); - fdir_desc->dtype_cmd_cntindex |= - cpu_to_le32((fdir_data->cnt_index - << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - & I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dcc |= ((u32)fdir_data->cnt_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; } + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); /* Now program a dummy descriptor */ - tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); - tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; - tx_ring->next_to_use++; - if (tx_ring->next_to_use == tx_ring->count) - tx_ring->next_to_use = 0; + i = tx_ring->next_to_use; + tx_desc = I40E_TX_DESC(tx_ring, i); + + tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; tx_desc->buffer_addr = cpu_to_le64(dma); - td_cmd = I40E_TX_DESC_CMD_EOP | - I40E_TX_DESC_CMD_RS | - I40E_TX_DESC_CMD_DUMMY; + td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); - /* Mark the data descriptor to be watched */ - tx_buf->next_to_watch = tx_desc; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, */ wmb(); + /* Mark the data descriptor to be watched */ + tx_buf->next_to_watch = tx_desc; + writel(tx_ring->next_to_use, tx_ring->tail); return 0; @@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) } /** - * i40e_unmap_tx_resource - Release a Tx buffer + * i40e_unmap_and_free_tx_resource - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buffer: the buffer to free **/ -static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, - struct i40e_tx_buffer *tx_buffer) +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) { - if (tx_buffer->dma) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE) - dma_unmap_page(ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); - else + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } - tx_buffer->dma = 0; - tx_buffer->time_stamp = 0; + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ } /** @@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, **/ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) { - struct i40e_tx_buffer *tx_buffer; unsigned long bi_size; u16 i; @@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) return; /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer = &tx_ring->tx_bi[i]; - i40e_unmap_tx_resource(tx_ring, tx_buffer); - if (tx_buffer->skb) - dev_kfree_skb_any(tx_buffer->skb); - tx_buffer->skb = NULL; - } + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; memset(tx_ring->tx_bi, 0, bi_size); @@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)); } /** @@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) && + if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } else { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets; + tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } @@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; - for (; budget; budget--) { - struct i40e_tx_desc *eop_desc; - - eop_desc = tx_buf->next_to_watch; + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) break; - /* count the packet as being completed */ - tx_ring->tx_stats.completed++; + /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; - tx_buf->time_stamp = 0; - - /* set memory barrier before eop_desc is verified */ - rmb(); - do { - i40e_unmap_tx_resource(tx_ring, tx_buf); + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; - /* clear dtype status */ - tx_desc->cmd_type_offset_bsz &= - ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK); + /* free the skb */ + dev_kfree_skb_any(tx_buf->skb); - if (likely(tx_desc == eop_desc)) { - eop_desc = NULL; + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); - dev_kfree_skb_any(tx_buf->skb); - tx_buf->skb = NULL; + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); - total_bytes += tx_buf->bytecount; - total_packets += tx_buf->gso_segs; - } + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { tx_buf++; tx_desc++; i++; - if (unlikely(i == tx_ring->count)) { - i = 0; + if (unlikely(!i)) { + i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } - } while (eop_desc); - } + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; tx_ring->next_to_clean = i; - tx_ring->tx_stats.bytes += total_bytes; - tx_ring->tx_stats.packets += total_packets; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" @@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) return true; } + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + total_packets, total_bytes); + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { @@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) i40e_set_new_dynamic_itr(&q_vector->tx); if (old_itr != q_vector->tx.itr) wr32(hw, reg_addr, q_vector->tx.itr); - - i40e_flush(hw); } /** @@ -1042,8 +1064,10 @@ next_desc: } rx_ring->next_to_clean = i; - rx_ring->rx_stats.packets += total_rx_packets; - rx_ring->rx_stats.bytes += total_rx_bytes; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; @@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) struct i40e_q_vector *q_vector = container_of(napi, struct i40e_q_vector, napi); struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; bool clean_complete = true; int budget_per_ring; - int i; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); return 0; } + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) + clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. - * Since the actual Tx work is minimal, we can give the Tx a larger - * budget and be more aggressive about cleaning up the Tx descriptors. */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - for (i = 0; i < q_vector->num_ringpairs; i++) { - clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], - vsi->work_limit); - clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i], - budget_per_ring); - } + + i40e_for_each_ring(ring, q_vector->rx) + clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); /* If work not completed, return budget and polling will return */ if (!clean_complete) @@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) qval = rd32(hw, I40E_QINT_TQCTL(0)); qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_flush(hw); + + i40e_irq_dynamic_enable_icr0(vsi->back); } } @@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; + u16 i; /* make sure ATR is enabled */ if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) @@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->atr_count = 0; /* grab the next descriptor */ - fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); - tx_ring->next_to_use++; - if (tx_ring->next_to_use == tx_ring->count) - tx_ring->next_to_use = 0; + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; @@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); } -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer @@ -1276,27 +1303,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, } /** - * i40e_tx_csum - is checksum offload requested - * @tx_ring: ptr to the ring to send - * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol - * - * Returns true if checksum offload is requested - **/ -static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) -{ - if ((skb->ip_summed != CHECKSUM_PARTIAL) && - !(tx_flags & I40E_TX_FLAGS_TXSW)) { - if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN)) - return false; - } - - return skb->ip_summed == CHECKSUM_PARTIAL; -} - -/** * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending @@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, const u32 cd_tunneling, const u32 cd_l2tag2) { struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ - context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); - tx_ring->next_to_use++; - if (tx_ring->next_to_use == tx_ring->count) - tx_ring->next_to_use = 0; + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* cpu_to_le32 and assign to struct fields */ context_desc->tunneling_params = cpu_to_le32(cd_tunneling); @@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct device *dev = tx_ring->dev; - u32 paylen = skb->len - hdr_len; - u16 i = tx_ring->next_to_use; + struct skb_frag_struct *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; - u32 buf_offset = 0; + u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) - goto dma_error; - if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> I40E_TX_FLAGS_VLAN_SHIFT; } + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + gso_segs = skb_shinfo(skb)->gso_segs; + else + gso_segs = 1; + + /* multiply data chunks by size of headers */ + first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); + first->gso_segs = gso_segs; + first->skb = skb; + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + tx_desc = I40E_TX_DESC(tx_ring, i); - for (;;) { - while (size > I40E_MAX_DATA_PER_TXD) { - tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, I40E_MAX_DATA_PER_TXD, td_tag); - buf_offset += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; - tx_desc++; i++; if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; } - } - tx_bi = &tx_ring->tx_bi[i]; - tx_bi->length = buf_offset + size; - tx_bi->tx_flags = tx_flags; - tx_bi->dma = dma; + dma += I40E_MAX_DATA_PER_TXD; + size -= I40E_MAX_DATA_PER_TXD; - tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); - tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - size, td_tag); + tx_desc->buffer_addr = cpu_to_le64(dma); + } if (likely(!data_len)) break; - size = skb_frag_size(frag); - data_len -= size; - buf_offset = 0; - tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE; - - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) - goto dma_error; + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); tx_desc++; i++; @@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - frag++; - } - - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + size = skb_frag_size(frag); + data_len -= size; - i++; - if (i == tx_ring->count) - i = 0; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); - tx_ring->next_to_use = i; + tx_bi = &tx_ring->tx_bi[i]; + } - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) - gso_segs = skb_shinfo(skb)->gso_segs; - else - gso_segs = 1; + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); - /* multiply data chunks by size of headers */ - tx_bi->bytecount = paylen + (gso_segs * hdr_len); - tx_bi->gso_segs = gso_segs; - tx_bi->skb = skb; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); - /* set the timestamp and next to watch values */ + /* set the timestamp */ first->time_stamp = jiffies; - first->next_to_watch = tx_desc; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, */ wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ writel(i, tx_ring->tail); + return; dma_error: - dev_info(dev, "TX DMA map failed\n"); + dev_info(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_bi map */ for (;;) { tx_bi = &tx_ring->tx_bi[i]; - i40e_unmap_tx_resource(tx_ring, tx_bi); + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); if (tx_bi == first) break; if (i == 0) @@ -1632,8 +1647,6 @@ dma_error: i--; } - dev_kfree_skb_any(skb); - tx_ring->next_to_use = i; } @@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + /* Always offload the checksum, since it's in the data descriptor */ - if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol)) + if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - /* always enable offload insertion */ - td_cmd |= I40E_TX_DESC_CMD_ICRC; - - if (tx_flags & I40E_TX_FLAGS_CSUM) i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); + } i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); @@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; + struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; /* hardware can't handle really short frames, hardware padding works * beyond this point diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index b1d7722d98a..db55d9947f1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -102,23 +102,20 @@ #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_TXSW (u32)(1 << 8) -#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define I40E_TX_FLAGS_VLAN_SHIFT 16 struct i40e_tx_buffer { - struct sk_buff *skb; - dma_addr_t dma; - unsigned long time_stamp; - u16 length; - u32 tx_flags; struct i40e_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; unsigned int bytecount; - u16 gso_segs; - u8 mapped_as_page; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; }; struct i40e_rx_buffer { @@ -129,18 +126,18 @@ struct i40e_rx_buffer { unsigned int page_offset; }; -struct i40e_tx_queue_stats { +struct i40e_queue_stats { u64 packets; u64 bytes; +}; + +struct i40e_tx_queue_stats { u64 restart_queue; u64 tx_busy; - u64 completed; u64 tx_done_old; }; struct i40e_rx_queue_stats { - u64 packets; - u64 bytes; u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; @@ -183,6 +180,7 @@ enum i40e_ring_state_t { /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ @@ -219,6 +217,8 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; union { struct i40e_tx_queue_stats tx_stats; struct i40e_rx_queue_stats rx_stats; @@ -229,6 +229,8 @@ struct i40e_ring { struct i40e_vsi *vsi; /* Backreference to associated VSI */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -238,9 +240,8 @@ enum i40e_latency_range { }; struct i40e_ring_container { -#define I40E_MAX_RINGPAIR_PER_VECTOR 8 /* array of pointers to rings */ - struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; + struct i40e_ring *ring; unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ u16 count; @@ -248,6 +249,10 @@ struct i40e_ring_container { u16 itr; }; +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 8967e58e240..07596982a47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN( - ((pf->hw.func_caps.num_msix_vectors_vf - 1) + (pf->hw.func_caps.num_msix_vectors_vf * vf->vf_id) + (vector_id - 1)); if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { @@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, /* associate this queue with the PCI VF function */ qtx_ctl = I40E_QTX_CTL_VF_QUEUE; - qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 74a1506b423..8c2437722aa 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -28,14 +28,14 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ -extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); -extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); -extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); -extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data); -extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data); +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_DEF1_DEF2 << 8) | \ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 37a9c06a6c6..2e166b22d52 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -562,11 +562,11 @@ struct e1000_hw { u8 revision_id; }; -extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); #define hw_dbg(format, arg...) \ netdev_dbg(igb_get_hw_dev(hw), format, ##arg) /* These functions must be implemented by drivers */ -s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); -s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); #endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index dde3c4b7ea9..2d913716573 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -28,26 +28,24 @@ #ifndef _E1000_I210_H_ #define _E1000_I210_H_ -extern s32 igb_update_flash_i210(struct e1000_hw *hw); -extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw); -extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw); -extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); -extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, - u16 words, u16 *data); -extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); -extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); -extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw); -extern void igb_release_nvm_i210(struct e1000_hw *hw); -extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); -extern s32 igb_read_invm_version(struct e1000_hw *hw, - struct e1000_fw_version *invm_ver); -extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, - u16 *data); -extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, - u16 data); -extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw); -extern bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_update_flash_i210(struct e1000_hw *hw); +s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_acquire_nvm_i210(struct e1000_hw *hw); +void igb_release_nvm_i210(struct e1000_hw *hw); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 5e13e83cc60..e4cbe8ef67b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -86,6 +86,6 @@ enum e1000_mng_mode { #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 -extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +void e1000_init_function_pointers_82575(struct e1000_hw *hw); #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index e7266759a10..ad2b74d9513 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -708,11 +708,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) hw_dbg("Error committing the PHY changes\n"); goto out; } - if (phy->type == e1000_phy_i210) { - ret_val = igb_set_master_slave_mode(hw); - if (ret_val) - return ret_val; - } out: return ret_val; @@ -806,6 +801,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) hw_dbg("Error committing the PHY changes\n"); return ret_val; } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; return 0; } @@ -1730,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 6807b098eda..5e9ed89403a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -483,40 +483,38 @@ enum igb_boards { extern char igb_driver_name[]; extern char igb_driver_version[]; -extern int igb_up(struct igb_adapter *); -extern void igb_down(struct igb_adapter *); -extern void igb_reinit_locked(struct igb_adapter *); -extern void igb_reset(struct igb_adapter *); -extern void igb_write_rss_indir_tbl(struct igb_adapter *); -extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8); -extern int igb_setup_tx_resources(struct igb_ring *); -extern int igb_setup_rx_resources(struct igb_ring *); -extern void igb_free_tx_resources(struct igb_ring *); -extern void igb_free_rx_resources(struct igb_ring *); -extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); -extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); -extern void igb_setup_tctl(struct igb_adapter *); -extern void igb_setup_rctl(struct igb_adapter *); -extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); -extern void igb_unmap_and_free_tx_resource(struct igb_ring *, - struct igb_tx_buffer *); -extern void igb_alloc_rx_buffers(struct igb_ring *, u16); -extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); -extern bool igb_has_link(struct igb_adapter *adapter); -extern void igb_set_ethtool_ops(struct net_device *); -extern void igb_power_up_link(struct igb_adapter *); -extern void igb_set_fw_version(struct igb_adapter *); -extern void igb_ptp_init(struct igb_adapter *adapter); -extern void igb_ptp_stop(struct igb_adapter *adapter); -extern void igb_ptp_reset(struct igb_adapter *adapter); -extern void igb_ptp_tx_work(struct work_struct *work); -extern void igb_ptp_rx_hang(struct igb_adapter *adapter); -extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); -extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, - struct sk_buff *skb); -extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, - unsigned char *va, - struct sk_buff *skb); +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_tx_work(struct work_struct *work); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, + struct sk_buff *skb); static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd); +int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); #ifdef CONFIG_IGB_HWMON -extern void igb_sysfs_exit(struct igb_adapter *adapter); -extern int igb_sysfs_init(struct igb_adapter *adapter); +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); #endif static inline s32 igb_reset_phy(struct e1000_hw *hw) { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 86d51429a18..c3143da497c 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -146,6 +146,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; u32 status; + status = rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { ecmd->supported = (SUPPORTED_10baseT_Half | @@ -169,13 +170,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->transceiver = XCVR_INTERNAL; } else { ecmd->supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg | SUPPORTED_Pause); - ecmd->advertising = ADVERTISED_FIBRE; - - if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { - ecmd->supported |= SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_1000baseT_Full; + ecmd->advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->supported &= + ~SUPPORTED_1000baseKX_Full; + ecmd->advertising |= ADVERTISED_2500baseX_Full; + ecmd->advertising &= + ~ADVERTISED_1000baseKX_Full; + } } if (eth_flags->e100_base_fx) { ecmd->supported |= SUPPORTED_100baseT_Full; @@ -187,35 +197,29 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; } - if (hw->mac.autoneg != 1) ecmd->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (hw->fc.requested_mode == e1000_fc_full) + switch (hw->fc.requested_mode) { + case e1000_fc_full: ecmd->advertising |= ADVERTISED_Pause; - else if (hw->fc.requested_mode == e1000_fc_rx_pause) + break; + case e1000_fc_rx_pause: ecmd->advertising |= (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - else if (hw->fc.requested_mode == e1000_fc_tx_pause) + break; + case e1000_fc_tx_pause: ecmd->advertising |= ADVERTISED_Asym_Pause; - else + break; + default: ecmd->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - status = rd32(E1000_STATUS); - + } if (status & E1000_STATUS_LU) { - if (hw->mac.type == e1000_i354) { - if ((status & E1000_STATUS_2P5_SKU) && - !(status & E1000_STATUS_2P5_SKU_OVER)) { - ecmd->supported = SUPPORTED_2500baseX_Full; - ecmd->advertising = ADVERTISED_2500baseX_Full; - ecmd->speed = SPEED_2500; - } else { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; - } + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + ecmd->speed = SPEED_2500; } else if (status & E1000_STATUS_SPEED_1000) { ecmd->speed = SPEED_1000; } else if (status & E1000_STATUS_SPEED_100) { @@ -232,7 +236,6 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->speed = -1; ecmd->duplex = -1; } - if ((hw->phy.media_type == e1000_media_type_fiber) || hw->mac.autoneg) ecmd->autoneg = AUTONEG_ENABLE; @@ -771,8 +774,10 @@ static int igb_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; - if (hw->mac.type == e1000_i211) + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { return -EOPNOTSUPP; + } if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; @@ -1659,7 +1664,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { /* Enable DH89xxCC MPHY for near end loopback */ reg = rd32(E1000_MPHY_ADDR_CTL); @@ -1725,7 +1731,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter) if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || - (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) { + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { u32 reg; /* Disable near end loopback on DH89xxCC */ @@ -2055,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC | - WAKE_PHY; wol->wolopts = 0; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return; + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + /* apply any specific unsupported masks here */ switch (adapter->hw.device_id) { default: @@ -2655,6 +2663,8 @@ static int igb_set_eee(struct net_device *netdev, (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + ret_val = igb_get_eee(netdev, &eee_curr); if (ret_val) return ret_val; @@ -2875,6 +2885,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir) return 0; } +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_combined = 0; + + switch (hw->mac.type) { + case e1000_i211: + max_combined = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_combined = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + if (!!adapter->vfs_allocated_count) { + max_combined = 1; + break; + } + /* fall through */ + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_combined = 2; + break; + } + /* fall through */ + case e1000_82580: + case e1000_i354: + default: + max_combined = IGB_MAX_RX_QUEUES; + break; + } + + return max_combined; +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->msix_entries) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + if (count > igb_max_channels(adapter)) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2911,6 +3003,8 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_rxfh_indir_size = igb_get_rxfh_indir_size, .get_rxfh_indir = igb_get_rxfh_indir, .set_rxfh_indir = igb_set_rxfh_indir, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, }; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8cf44f2a8cc..025e5f4b748 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -182,6 +182,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); #endif #ifdef CONFIG_PM @@ -1223,6 +1224,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -1256,6 +1260,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; + u64_stats_init(&ring->rx_syncp); + /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; } @@ -2034,21 +2040,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } } @@ -2429,7 +2429,7 @@ err_dma: } #ifdef CONFIG_PCI_IOV -static int igb_disable_sriov(struct pci_dev *pdev) +static int igb_disable_sriov(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -2470,27 +2470,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) int err = 0; int i; - if (!adapter->msix_entries) { + if (!adapter->msix_entries || num_vfs > 7) { err = -EPERM; goto out; } - if (!num_vfs) goto out; - else if (old_vfs && old_vfs == num_vfs) - goto out; - else if (old_vfs && old_vfs != num_vfs) - err = igb_disable_sriov(pdev); - if (err) - goto out; - - if (num_vfs > 7) { - err = -EPERM; - goto out; - } - - adapter->vfs_allocated_count = num_vfs; + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); @@ -2504,10 +2496,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) goto out; } - err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); - if (err) - goto err_out; - + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } dev_info(&pdev->dev, "%d VFs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) @@ -2623,7 +2617,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) return; pci_sriov_set_totalvfs(pdev, 7); - igb_enable_sriov(pdev, max_vfs); + igb_pci_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } @@ -5708,7 +5702,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, 6); + memcpy(addr, vf_mac, ETH_ALEN); igb_write_mbx(hw, msgbuf, 3, vf); } @@ -7838,4 +7832,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, return E1000_SUCCESS; } + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_clear_interrupt_scheme(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index a1463e3d14c..7d6a25c8f88 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -312,17 +312,17 @@ enum igbvf_state_t { extern char igbvf_driver_name[]; extern const char igbvf_driver_version[]; -extern void igbvf_check_options(struct igbvf_adapter *); -extern void igbvf_set_ethtool_ops(struct net_device *); - -extern int igbvf_up(struct igbvf_adapter *); -extern void igbvf_down(struct igbvf_adapter *); -extern void igbvf_reinit_locked(struct igbvf_adapter *); -extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); -extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); -extern void igbvf_free_rx_resources(struct igbvf_ring *); -extern void igbvf_free_tx_resources(struct igbvf_ring *); -extern void igbvf_update_stats(struct igbvf_adapter *); +void igbvf_check_options(struct igbvf_adapter *); +void igbvf_set_ethtool_ops(struct net_device *); + +int igbvf_up(struct igbvf_adapter *); +void igbvf_down(struct igbvf_adapter *); +void igbvf_reinit_locked(struct igbvf_adapter *); +int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); +int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); +void igbvf_free_rx_resources(struct igbvf_ring *); +void igbvf_free_tx_resources(struct igbvf_ring *); +void igbvf_update_stats(struct igbvf_adapter *); extern unsigned int copybreak; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 93eb7ee06d3..04bf22e5ee3 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2343,10 +2343,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) struct igbvf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) return -EINVAL; - } #define MAX_STD_JUMBO_FRAME_SIZE 9234 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { @@ -2638,21 +2637,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; } } @@ -2699,7 +2692,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ei->get_variants) { err = ei->get_variants(adapter); if (err) - goto err_ioremap; + goto err_get_variants; } /* setup adapter struct */ @@ -2796,6 +2789,7 @@ err_hw_init: kfree(adapter->rx_ring); err_sw_init: igbvf_reset_interrupt_capability(adapter); +err_get_variants: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index eea0e10ce12..955ad8c2c53 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) - memcpy(hw->mac.perm_addr, addr, 6); + memcpy(hw->mac.perm_addr, addr, ETH_ALEN); else ret_val = -E1000_ERR_MAC_INIT; } @@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) memset(msgbuf, 0, 12); msgbuf[0] = E1000_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, 6); + memcpy(msg_addr, addr, ETH_ALEN); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index 4d2ae97ff1b..2224cc2edf1 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -187,21 +187,21 @@ enum ixgb_state_t { }; /* Exported from other modules */ -extern void ixgb_check_options(struct ixgb_adapter *adapter); -extern void ixgb_set_ethtool_ops(struct net_device *netdev); +void ixgb_check_options(struct ixgb_adapter *adapter); +void ixgb_set_ethtool_ops(struct net_device *netdev); extern char ixgb_driver_name[]; extern const char ixgb_driver_version[]; -extern void ixgb_set_speed_duplex(struct net_device *netdev); +void ixgb_set_speed_duplex(struct net_device *netdev); -extern int ixgb_up(struct ixgb_adapter *adapter); -extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); -extern void ixgb_reset(struct ixgb_adapter *adapter); -extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); -extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); -extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter); -extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter); -extern void ixgb_update_stats(struct ixgb_adapter *adapter); +int ixgb_up(struct ixgb_adapter *adapter); +void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); +void ixgb_reset(struct ixgb_adapter *adapter); +int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); +int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); +void ixgb_free_rx_resources(struct ixgb_adapter *adapter); +void ixgb_free_tx_resources(struct ixgb_adapter *adapter); +void ixgb_update_stats(struct ixgb_adapter *adapter); #endif /* _IXGB_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 2a99a35c33a..0bd5d72e1af 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -759,27 +759,20 @@ struct ixgb_hw_stats { }; /* Function Prototypes */ -extern bool ixgb_adapter_stop(struct ixgb_hw *hw); -extern bool ixgb_init_hw(struct ixgb_hw *hw); -extern bool ixgb_adapter_start(struct ixgb_hw *hw); -extern void ixgb_check_for_link(struct ixgb_hw *hw); -extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw); - -extern void ixgb_rar_set(struct ixgb_hw *hw, - u8 *addr, - u32 index); +bool ixgb_adapter_stop(struct ixgb_hw *hw); +bool ixgb_init_hw(struct ixgb_hw *hw); +bool ixgb_adapter_start(struct ixgb_hw *hw); +void ixgb_check_for_link(struct ixgb_hw *hw); +bool ixgb_check_for_bad_link(struct ixgb_hw *hw); +void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index); /* Filters (multicast, vlan, receive) */ -extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, - u8 *mc_addr_list, - u32 mc_addr_count, - u32 pad); +void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad); /* Vfta functions */ -extern void ixgb_write_vfta(struct ixgb_hw *hw, - u32 offset, - u32 value); +void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value); /* Access functions to eeprom data */ void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 9f6b236828e..57e390cbe6d 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - pr_err("No usable DMA configuration, aborting\n"); - goto err_dma_mask; - } + pr_err("No usable DMA configuration, aborting\n"); + goto err_dma_mask; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 0ac6b11c6e4..f38fc0a343a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -55,7 +55,7 @@ #include <net/busy_poll.h> #ifdef CONFIG_NET_RX_BUSY_POLL -#define LL_EXTENDED_STATS +#define BP_EXTENDED_STATS #endif /* common prefix used by pr_<> macros */ #undef pr_fmt @@ -67,7 +67,11 @@ #define IXGBE_MAX_TXD 4096 #define IXGBE_MIN_TXD 64 +#if (PAGE_SIZE < 8192) #define IXGBE_DEFAULT_RXD 512 +#else +#define IXGBE_DEFAULT_RXD 128 +#endif #define IXGBE_MAX_RXD 4096 #define IXGBE_MIN_RXD 64 @@ -187,11 +191,11 @@ struct ixgbe_rx_buffer { struct ixgbe_queue_stats { u64 packets; u64 bytes; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS u64 yields; u64 misses; u64 cleaned; -#endif /* LL_EXTENDED_STATS */ +#endif /* BP_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -219,6 +223,15 @@ enum ixgbe_ring_state_t { __IXGBE_RX_FCOE, }; +struct ixgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *netdev; + struct ixgbe_adapter *real_adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int pool; +}; + #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \ @@ -236,6 +249,7 @@ struct ixgbe_ring { struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ struct device *dev; /* device for DMA mapping */ + struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; @@ -293,6 +307,12 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 + struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -369,11 +389,13 @@ struct ixgbe_q_vector { #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int state; #define IXGBE_QV_STATE_IDLE 0 -#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ -#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ -#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) -#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */ -#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */ +#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED) +#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) spinlock_t lock; @@ -394,18 +416,18 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) { int rc = true; - spin_lock(&q_vector->lock); + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBE_QV_LOCKED) { WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; rc = false; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS q_vector->tx.ring->stats.yields++; #endif } else /* we don't care if someone yielded */ q_vector->state = IXGBE_QV_STATE_NAPI; - spin_unlock(&q_vector->lock); + spin_unlock_bh(&q_vector->lock); return rc; } @@ -413,14 +435,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) { int rc = false; - spin_lock(&q_vector->lock); + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_NAPI_YIELD)); if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) rc = true; - q_vector->state = IXGBE_QV_STATE_IDLE; - spin_unlock(&q_vector->lock); + /* will reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBE_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); return rc; } @@ -432,7 +455,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) if ((q_vector->state & IXGBE_QV_LOCKED)) { q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; rc = false; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS q_vector->rx.ring->stats.yields++; #endif } else @@ -451,17 +474,32 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) rc = true; - q_vector->state = IXGBE_QV_STATE_IDLE; + /* will reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBE_QV_STATE_DISABLED; spin_unlock_bh(&q_vector->lock); return rc; } /* true if a socket is polling, even if it did not get the lock */ -static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { - WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); + WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); return q_vector->state & IXGBE_QV_USER_PEND; } + +/* false if QV is currently owned */ +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBE_QV_OWNED) + rc = false; + q_vector->state |= IXGBE_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + + return rc; +} + #else /* CONFIG_NET_RX_BUSY_POLL */ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) { @@ -487,10 +525,16 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) return false; } -static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { return false; } + +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + return true; +} + #endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_IXGBE_HWMON @@ -738,6 +782,7 @@ struct ixgbe_adapter { #endif /*CONFIG_DEBUG_FS*/ u8 default_up; + unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ }; struct ixgbe_fdir_filter { @@ -786,93 +831,89 @@ extern const char ixgbe_driver_version[]; extern char ixgbe_default_device_descr[]; #endif /* IXGBE_FCOE */ -extern void ixgbe_up(struct ixgbe_adapter *adapter); -extern void ixgbe_down(struct ixgbe_adapter *adapter); -extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); -extern void ixgbe_reset(struct ixgbe_adapter *adapter); -extern void ixgbe_set_ethtool_ops(struct net_device *netdev); -extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); -extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); -extern void ixgbe_free_rx_resources(struct ixgbe_ring *); -extern void ixgbe_free_tx_resources(struct ixgbe_ring *); -extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); -extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); -extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *); -extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); -extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, +void ixgbe_up(struct ixgbe_adapter *adapter); +void ixgbe_down(struct ixgbe_adapter *adapter); +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); +void ixgbe_reset(struct ixgbe_adapter *adapter); +void ixgbe_set_ethtool_ops(struct net_device *netdev); +int ixgbe_setup_rx_resources(struct ixgbe_ring *); +int ixgbe_setup_tx_resources(struct ixgbe_ring *); +void ixgbe_free_rx_resources(struct ixgbe_ring *); +void ixgbe_free_tx_resources(struct ixgbe_ring *); +void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_update_stats(struct ixgbe_adapter *adapter); +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, u16 subdevice_id); -extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, - struct ixgbe_adapter *, - struct ixgbe_ring *); -extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); -extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); -extern void ixgbe_write_eitr(struct ixgbe_q_vector *); -extern int ixgbe_poll(struct napi_struct *napi, int budget); -extern int ethtool_ioctl(struct ifreq *ifr); -extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); -extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_hash_dword input, - union ixgbe_atr_hash_dword common, - u8 queue); -extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask); -extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id, u8 queue); -extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input, - u16 soft_id); -extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, - union ixgbe_atr_input *mask); -extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); -extern void ixgbe_set_rx_mode(struct net_device *netdev); +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, + struct ixgbe_ring *); +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, + struct ixgbe_tx_buffer *); +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +void ixgbe_write_eitr(struct ixgbe_q_vector *); +int ixgbe_poll(struct napi_struct *napi, int budget); +int ethtool_ioctl(struct ifreq *ifr); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB -extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); #endif -extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); -extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); -extern void ixgbe_do_reset(struct net_device *netdev); +int ixgbe_setup_tc(struct net_device *dev, u8 tc); +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); +void ixgbe_do_reset(struct net_device *netdev); #ifdef CONFIG_IXGBE_HWMON -extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); -extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); #endif /* CONFIG_IXGBE_HWMON */ #ifdef IXGBE_FCOE -extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -extern int ixgbe_fso(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u8 *hdr_len); -extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb); -extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc); -extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); -extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); -extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); -extern int ixgbe_fcoe_enable(struct net_device *netdev); -extern int ixgbe_fcoe_disable(struct net_device *netdev); +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, + u8 *hdr_len); +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +int ixgbe_fcoe_enable(struct net_device *netdev); +int ixgbe_fcoe_disable(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB -extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); -extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); #endif /* CONFIG_IXGBE_DCB */ -extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); -extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, - struct netdev_fcoe_hbainfo *info); -extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info); +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); #endif /* IXGBE_FCOE */ #ifdef CONFIG_DEBUG_FS -extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); -extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); -extern void ixgbe_dbg_init(void); -extern void ixgbe_dbg_exit(void); +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); +void ixgbe_dbg_init(void); +void ixgbe_dbg_exit(void); #else static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} @@ -884,12 +925,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) return netdev_get_tx_queue(ring->netdev, ring->queue_index); } -extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); +void ixgbe_ptp_init(struct ixgbe_adapter *adapter); +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -906,13 +947,16 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, rx_ring->last_rx_timestamp = jiffies; } -extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, - struct ifreq *ifr, int cmd); -extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); -extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, + int cmd); +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); #endif +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e8649abf97c..4e7c9b098b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) static int ixgbe_get_regs_len(struct net_device *netdev) { -#define IXGBE_REGS_LEN 1129 +#define IXGBE_REGS_LEN 1139 return IXGBE_REGS_LEN * sizeof(u32); } @@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); /* DCB */ - regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); - regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); - regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); - regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); - for (i = 0; i < 8; i++) - regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); - for (i = 0; i < 8; i++) - regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); - for (i = 0; i < 8; i++) - regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); - for (i = 0; i < 8; i++) - regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); + break; + default: + break; + } + for (i = 0; i < 8; i++) - regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); + regs_buff[865 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ for (i = 0; i < 8; i++) - regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); + regs_buff[873 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ /* Statistics */ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); @@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev, /* 82599 X540 specific registers */ regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* 82599 X540 specific DCB registers */ + regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); + for (i = 0; i < 4; i++) + regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); + regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); + /* same as RTTQCNRM */ + regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); + /* same as RTTQCNRR */ + + /* X540 specific DCB registers */ + regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); + regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); } static int ixgbe_get_eeprom_len(struct net_device *netdev) @@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS data[i] = 0; data[i+1] = 0; data[i+2] = 0; @@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; data[i+1] = ring->stats.misses; data[i+2] = ring->stats.cleaned; @@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS data[i] = 0; data[i+1] = 0; data[i+2] = 0; @@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; data[i+1] = ring->stats.misses; data[i+2] = ring->stats.cleaned; @@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef LL_EXTENDED_STATS - sprintf(p, "tx_queue_%u_ll_napi_yield", i); +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_ll_misses", i); + sprintf(p, "tx_queue_%u_bp_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_ll_cleaned", i); + sprintf(p, "tx_queue_%u_bp_cleaned", i); p += ETH_GSTRING_LEN; -#endif /* LL_EXTENDED_STATS */ +#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; -#ifdef LL_EXTENDED_STATS - sprintf(p, "rx_queue_%u_ll_poll_yield", i); +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_ll_misses", i); + sprintf(p, "rx_queue_%u_bp_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_ll_cleaned", i); + sprintf(p, "rx_queue_%u_bp_cleaned", i); p += ETH_GSTRING_LEN; -#endif /* LL_EXTENDED_STATS */ +#endif /* BP_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); @@ -2212,13 +2257,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev, #if IS_ENABLED(CONFIG_BQL) /* detect ITR changes that require update of TXDCTL.WTHRESH */ - if ((adapter->tx_itr_setting > 1) && + if ((adapter->tx_itr_setting != 1) && (adapter->tx_itr_setting < IXGBE_100K_ITR)) { if ((tx_itr_prev == 1) || - (tx_itr_prev > IXGBE_100K_ITR)) + (tx_itr_prev >= IXGBE_100K_ITR)) need_reset = true; } else { - if ((tx_itr_prev > 1) && + if ((tx_itr_prev != 1) && (tx_itr_prev < IXGBE_100K_ITR)) need_reset = true; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 90b4e1089ec..32e3eaaa160 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -498,6 +498,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif + bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) @@ -510,7 +511,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ - if ((vmdq_i > 32) || (rss_i < 4)) { + if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); @@ -852,7 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* apply Tx specific ring traits */ ring->count = adapter->tx_ring_count; - ring->queue_index = txr_idx; + if (adapter->num_rx_pools > 1) + ring->queue_index = + txr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -895,7 +900,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, #endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; - ring->queue_index = rxr_idx; + if (adapter->num_rx_pools > 1) + ring->queue_index = + rxr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0ade0cd5ef5..cc06854296a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -44,6 +44,7 @@ #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> +#include <linux/if_macvlan.h> #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> @@ -132,7 +133,7 @@ static struct notifier_block dca_notifier = { static unsigned int max_vfs; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, - "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63"); + "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); #endif /* CONFIG_PCI_IOV */ static unsigned int allow_unsupported_sfp; @@ -153,7 +154,6 @@ MODULE_VERSION(DRV_VERSION); static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { - int pos = 0; struct pci_dev *parent_dev; struct pci_bus *parent_bus; @@ -165,11 +165,10 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, if (!parent_dev) return -1; - pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); - if (!pos) + if (!pci_is_pcie(parent_dev)) return -1; - pci_read_config_word(parent_dev, pos + reg, value); + pcie_capability_read_word(parent_dev, reg, value); return 0; } @@ -247,7 +246,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, max_gts = 4 * width; break; case PCIE_SPEED_8_0GT: - /* 128b/130b encoding only reduces throughput by 1% */ + /* 128b/130b encoding reduces throughput by less than 2% */ max_gts = 8 * width; break; default: @@ -265,7 +264,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, width, (speed == PCIE_SPEED_2_5GT ? "20%" : speed == PCIE_SPEED_5_0GT ? "20%" : - speed == PCIE_SPEED_8_0GT ? "N/a" : + speed == PCIE_SPEED_8_0GT ? "<2%" : "Unknown")); if (max_gts < expected_gts) { @@ -872,11 +871,18 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); - struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u32 head, tail; - u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + if (ring->l2_accel_priv) + adapter = ring->l2_accel_priv->real_adapter; + else + adapter = netdev_priv(ring->netdev); + + hw = &adapter->hw; + head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); if (head != tail) return (head < tail) ? @@ -1585,7 +1591,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, { struct ixgbe_adapter *adapter = q_vector->adapter; - if (ixgbe_qv_ll_polling(q_vector)) + if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); @@ -2097,7 +2103,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) ixgbe_for_each_ring(ring, q_vector->rx) { found = ixgbe_clean_rx_irq(q_vector, ring, 4); -#ifdef LL_EXTENDED_STATS +#ifdef BP_EXTENDED_STATS if (found) ring->stats.cleaned += found; else @@ -3005,7 +3011,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_q_vector *q_vector = ring->q_vector; if (q_vector) - netif_set_xps_queue(adapter->netdev, + netif_set_xps_queue(ring->netdev, &q_vector->affinity_mask, ring->queue_index); } @@ -3395,7 +3401,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - int p; + u16 pool; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -3412,9 +3418,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) else if (rss_i > 1) psrtype |= 1 << 29; - for (p = 0; p < adapter->num_rx_pools; p++) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), - psrtype); + for_each_set_bit(pool, &adapter->fwd_bitmask, 32) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) @@ -3683,7 +3688,11 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); @@ -3713,7 +3722,11 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); @@ -3750,7 +3763,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) unsigned int rar_entries = hw->mac.num_rar_entries - 1; int count = 0; - /* In SR-IOV mode significantly less RAR entries are available */ + /* In SR-IOV/VMDQ modes significantly less RAR entries are available */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) rar_entries = IXGBE_MAX_PF_MACVLANS - 1; @@ -3825,14 +3838,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; - } else { - /* - * Write addresses to the MTA, if the attempt fails - * then we should just turn on promiscuous mode so - * that we can at least receive multicast traffic - */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - vmolr |= IXGBE_VMOLR_ROMPE; } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; @@ -3849,6 +3854,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + vmolr |= IXGBE_VMOLR_ROMPE; + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); @@ -3893,15 +3905,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - local_bh_disable(); /* for ixgbe_qv_lock_napi() */ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { napi_disable(&adapter->q_vector[q_idx]->napi); - while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { + while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { pr_info("QV %d locked\n", q_idx); - mdelay(1); + usleep_range(1000, 20000); } } - local_bh_enable(); } #ifdef CONFIG_IXGBE_DCB @@ -4118,6 +4128,228 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } +static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); + + /* clear the affected bit */ + vmolr &= ~IXGBE_VMOLR_MPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= IXGBE_VMOLR_MPE; + } else { + vmolr |= IXGBE_VMOLR_ROMPE; + hw->mac.ops.update_mc_addr_list(hw, dev); + } + ixgbe_write_uc_addr_list(adapter->netdev); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); +} + +static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + u8 *addr, u16 pool) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int entry; + + entry = hw->mac.num_rar_entries - pool; + hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV); +} + +static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int rss_i = adapter->num_rx_queues_per_pool; + struct ixgbe_hw *hw = &adapter->hw; + u16 pool = vadapter->pool; + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (IXGBE_CB(skb)->page_released) { + dma_unmap_page(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } + dev_kfree_skb(skb); + } + rx_buffer->skb = NULL; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_pages(rx_buffer->page, + ixgbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, + struct ixgbe_ring *rx_ring) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int index = rx_ring->queue_index + vadapter->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + ixgbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); + ixgbe_clean_rx_ring(rx_ring); + rx_ring->l2_accel_priv = NULL; +} + +static int ixgbe_fwd_ring_down(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + + return 0; +} + +static int ixgbe_fwd_ring_up(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->pool, &adapter->fwd_bitmask)) + return 0; + + baseq = accel->pool * adapter->num_rx_queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->pool, adapter->num_rx_pools, + baseq, baseq + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + + accel->netdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->l2_accel_priv = accel; + } + + queues = min_t(unsigned int, + adapter->num_rx_queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); + + ixgbe_fwd_psrtype(accel); + ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + return err; +fwd_queue_err: + ixgbe_fwd_ring_down(vdev, accel); + return err; +} + +static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (dfwd->fwd_priv) { + err = ixgbe_fwd_ring_up(upper, vadapter); + if (err) + continue; + } + } + } +} + static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -4169,6 +4401,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); + ixgbe_configure_dfwd(adapter); } static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) @@ -4322,6 +4555,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) static void ixgbe_up_complete(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; int err; u32 ctrl_ext; @@ -4365,6 +4600,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); + /* enable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_start_all_queues(upper); + } + } + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4456,59 +4701,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @rx_ring: ring to free buffers from - **/ -static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_buffer_info[i]; - if (rx_buffer->skb) { - struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) { - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } - dev_kfree_skb(skb); - } - rx_buffer->skb = NULL; - if (rx_buffer->dma) - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (rx_buffer->page) - __free_pages(rx_buffer->page, - ixgbe_rx_pg_order(rx_ring)); - rx_buffer->page = NULL; - } - - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -/** * ixgbe_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ @@ -4585,6 +4777,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; u32 rxctrl; int i; @@ -4608,6 +4802,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter) netif_carrier_off(netdev); netif_tx_disable(netdev); + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } + ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); @@ -4816,11 +5023,20 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); #ifdef CONFIG_PCI_IOV + if (max_vfs > 0) + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); + /* assign number of SR-IOV VFs */ - if (hw->mac.type != ixgbe_mac_82598EB) - adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; + if (hw->mac.type != ixgbe_mac_82598EB) { + if (max_vfs > 63) { + adapter->num_vfs = 0; + e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); + } else { + adapter->num_vfs = max_vfs; + } + } +#endif /* CONFIG_PCI_IOV */ -#endif /* enable itr by default in dynamic mode */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; @@ -4838,6 +5054,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) return -EIO; } + /* PF holds first pool slot */ + set_bit(0, &adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); return 0; @@ -4867,6 +5085,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (!tx_ring->tx_buffer_info) goto err; + u64_stats_init(&tx_ring->syncp); + /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); @@ -4949,6 +5169,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) if (!rx_ring->rx_buffer_info) goto err; + u64_stats_init(&rx_ring->syncp); + /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); @@ -5143,7 +5365,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) static int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int err; + int err, queues; /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) @@ -5168,16 +5390,21 @@ static int ixgbe_open(struct net_device *netdev) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, - adapter->num_rx_pools > 1 ? 1 : - adapter->num_tx_queues); + if (adapter->num_rx_pools > 1) + queues = adapter->num_rx_queues_per_pool; + else + queues = adapter->num_tx_queues; + + err = netif_set_real_num_tx_queues(netdev, queues); if (err) goto err_set_queues; - - err = netif_set_real_num_rx_queues(netdev, - adapter->num_rx_pools > 1 ? 1 : - adapter->num_rx_queues); + if (adapter->num_rx_pools > 1 && + adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) + queues = IXGBE_MAX_L2A_QUEUES; + else + queues = adapter->num_rx_queues; + err = netif_set_real_num_rx_queues(netdev, queues); if (err) goto err_set_queues; @@ -6767,8 +6994,9 @@ out_drop: return NETDEV_TX_OK; } -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) +static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, + struct ixgbe_ring *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; @@ -6784,10 +7012,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, skb_set_tail_pointer(skb, 17); } - tx_ring = adapter->tx_ring[skb->queue_mapping]; + tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + return __ixgbe_xmit_frame(skb, netdev, NULL); +} + /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -7044,6 +7279,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + bool pools; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || @@ -7051,6 +7287,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) tc < MAX_TRAFFIC_CLASS)) return -EINVAL; + pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) + return -EBUSY; + /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. @@ -7305,6 +7545,104 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); } +static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) +{ + struct ixgbe_fwd_adapter *fwd_adapter = NULL; + struct ixgbe_adapter *adapter = netdev_priv(pdev); + unsigned int limit; + int pool, err; + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || + vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 1,2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } + + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || + (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) + return ERR_PTR(-EBUSY); + + fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); + if (!fwd_adapter) + return ERR_PTR(-ENOMEM); + + pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); + adapter->num_rx_pools++; + set_bit(pool, &adapter->fwd_bitmask); + limit = find_last_bit(&adapter->fwd_bitmask, 32); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; + + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + if (err) + goto fwd_add_err; + fwd_adapter->pool = pool; + fwd_adapter->real_adapter = adapter; + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (err) + goto fwd_add_err; + netif_tx_start_all_queues(vdev); + return fwd_adapter; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + clear_bit(pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + kfree(fwd_adapter); + return ERR_PTR(err); +} + +static void ixgbe_fwd_del(struct net_device *pdev, void *priv) +{ + struct ixgbe_fwd_adapter *fwd_adapter = priv; + struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; + unsigned int limit; + + clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + + limit = find_last_bit(&adapter->fwd_bitmask, 32); + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); + ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + fwd_adapter->pool, adapter->num_rx_pools, + fwd_adapter->rx_base_queue, + fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + kfree(fwd_adapter); +} + +static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, + struct net_device *dev, + void *priv) +{ + struct ixgbe_fwd_adapter *fwd_adapter = priv; + unsigned int queue; + struct ixgbe_ring *tx_ring; + + queue = skb->queue_mapping + fwd_adapter->tx_base_queue; + tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; + + return __ixgbe_xmit_frame(skb, dev, tx_ring); +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -7349,6 +7687,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fdb_add = ixgbe_ndo_fdb_add, .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, + .ndo_dfwd_add_station = ixgbe_fwd_add, + .ndo_dfwd_del_station = ixgbe_fwd_del, + .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, }; /** @@ -7362,19 +7703,16 @@ static const struct net_device_ops ixgbe_netdev_ops = { **/ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; struct list_head *entry; int physfns = 0; - /* Some cards can not use the generic count PCIe functions method, and - * so must be hardcoded to the correct value. + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these with + * the correct number of functions. */ - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_SFP_SF_QP: - case IXGBE_DEV_ID_82599_QSFP_SF_QP: + if (ixgbe_pcie_from_parent(&adapter->hw)) { physfns = 4; - break; - default: + } else { list_for_each(entry, &adapter->pdev->bus_list) { struct pci_dev *pdev = list_entry(entry, struct pci_dev, bus_list); @@ -7490,19 +7828,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } pci_using_dac = 0; } @@ -7655,7 +7988,7 @@ skip_sriov: NETIF_F_RXHASH | NETIF_F_RXCSUM; - netdev->hw_features = netdev->features; + netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -7759,29 +8092,6 @@ skip_sriov: if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); - /* print bus type/speed/width info */ - e_dev_info("(PCI Express:%s:%s) %pM\n", - (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" : - hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : - hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : - "Unknown"), - (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : - hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : - hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : - "Unknown"), - netdev->dev_addr); - - err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); - if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); - else - e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", - hw->mac.type, hw->phy.type, part_str); - /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough * bandwidth due to being older generation PCIe parts. We clamp these @@ -7797,6 +8107,19 @@ skip_sriov: } ixgbe_check_minimum_link(adapter, expected_gts); + err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%pM\n", netdev->dev_addr); + /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); if (err == IXGBE_ERR_EEPROM_VERSION) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e4c676006be..39217e5ff7d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** * ixgbe_identify_phy_generic - Get physical layer module @@ -1164,7 +1165,7 @@ err_read_i2c_eeprom: * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; s32 status = IXGBE_ERR_PHY_ADDR_INVALID; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 24af12e3719..fffcbdd2bf0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -57,28 +57,28 @@ #define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 /* Bitmasks */ -#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 -#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 -#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 -#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 -#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 -#define IXGBE_SFF_1GBASET_CAPABLE 0x8 -#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 -#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 -#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 -#define IXGBE_SFF_ADDRESSING_MODE 0x4 -#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 -#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 -#define IXGBE_I2C_EEPROM_READ_MASK 0x100 -#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 -#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 -#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 -#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 -#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 @@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 276d7b13533..d6f0c0d8cf1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -129,10 +129,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) if (!pre_existing_vfs && !adapter->num_vfs) return; - if (!pre_existing_vfs) - dev_warn(&adapter->pdev->dev, - "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); - /* If there are pre-existing VFs then we have to force * use of that many - over ride any module parameter value. * This may result from the user unloading the PF driver @@ -223,17 +219,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) IXGBE_WRITE_FLUSH(hw); /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->ring_feature[RING_F_VMDQ].offset = 0; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + } else { + rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); + } - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_VMDQ].offset = 0; adapter->ring_feature[RING_F_RSS].limit = rss; /* take a breather then clean up driver data */ msleep(100); - - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; return 0; } @@ -298,13 +296,10 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ - if (!err && current_flags != adapter->flags) { - /* ixgbe_disable_sriov() doesn't clear VMDQ flag */ - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; #ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) ixgbe_sriov_reinit(adapter); #endif - } return err; } @@ -558,7 +553,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int rar_entry = hw->mac.num_rar_entries - (vf + 1); - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV); return 0; @@ -621,16 +616,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { - unsigned char vf_mac_addr[6]; struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); bool enable = ((event_mask & 0x10000000U) != 0); - if (enable) { - eth_zero_addr(vf_mac_addr); - memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); - } + if (enable) + eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 10775cb9b6d..7c19e969576 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RTTDQSEL 0x04904 #define IXGBE_RTTDT1C 0x04908 #define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C #define IXGBE_RTTDTECC 0x04990 #define IXGBE_RTTDTECC_NO_BCN 0x00000100 #define IXGBE_RTTBCNRC 0x04984 @@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RTTBCNRC_RF_INT_MASK \ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) #define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 /* FCoE DMA Context Registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 389324f5929..24b80a6cfca 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -32,12 +32,12 @@ #include "ixgbe.h" #include "ixgbe_phy.h" -#define IXGBE_X540_MAX_TX_QUEUES 128 -#define IXGBE_X540_MAX_RX_QUEUES 128 -#define IXGBE_X540_RAR_ENTRIES 128 -#define IXGBE_X540_MC_TBL_SIZE 128 -#define IXGBE_X540_VFT_TBL_SIZE 128 -#define IXGBE_X540_RX_PB_SIZE 384 +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index c9d0c12d6f0..54d9acef9c4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -45,16 +45,27 @@ struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; + struct { + int sizeof_stat; + int stat_offset; + int base_stat_offset; + int saved_reset_offset; + }; }; -#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ - offsetof(struct ixgbevf_adapter, m), \ - offsetof(struct ixgbevf_adapter, b), \ - offsetof(struct ixgbevf_adapter, r) +#define IXGBEVF_STAT(m, b, r) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ + .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +} + +#define IXGBEVF_ZSTAT(m) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = -1, \ + .saved_reset_offset = -1 \ +} static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, @@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { stats.saved_reset_vfgorc)}, {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, + {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, stats.saved_reset_vfmprc)}, - {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, - zero_base)}, - {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, - zero_base)}, - {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, - zero_base)}, + {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, + {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)}, +#ifdef BP_EXTENDED_STATS + {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, + {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, + {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, + {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, + {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, + {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, +#endif }; #define IXGBE_QUEUE_STATS_LEN 0 @@ -140,58 +156,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) -static char *ixgbevf_reg_names[] = { - "IXGBE_VFCTRL", - "IXGBE_VFSTATUS", - "IXGBE_VFLINKS", - "IXGBE_VFRXMEMWRAP", - "IXGBE_VFFRTIMER", - "IXGBE_VTEICR", - "IXGBE_VTEICS", - "IXGBE_VTEIMS", - "IXGBE_VTEIMC", - "IXGBE_VTEIAC", - "IXGBE_VTEIAM", - "IXGBE_VTEITR", - "IXGBE_VTIVAR", - "IXGBE_VTIVAR_MISC", - "IXGBE_VFRDBAL0", - "IXGBE_VFRDBAL1", - "IXGBE_VFRDBAH0", - "IXGBE_VFRDBAH1", - "IXGBE_VFRDLEN0", - "IXGBE_VFRDLEN1", - "IXGBE_VFRDH0", - "IXGBE_VFRDH1", - "IXGBE_VFRDT0", - "IXGBE_VFRDT1", - "IXGBE_VFRXDCTL0", - "IXGBE_VFRXDCTL1", - "IXGBE_VFSRRCTL0", - "IXGBE_VFSRRCTL1", - "IXGBE_VFPSRTYPE", - "IXGBE_VFTDBAL0", - "IXGBE_VFTDBAL1", - "IXGBE_VFTDBAH0", - "IXGBE_VFTDBAH1", - "IXGBE_VFTDLEN0", - "IXGBE_VFTDLEN1", - "IXGBE_VFTDH0", - "IXGBE_VFTDH1", - "IXGBE_VFTDT0", - "IXGBE_VFTDT1", - "IXGBE_VFTXDCTL0", - "IXGBE_VFTXDCTL1", - "IXGBE_VFTDWBAL0", - "IXGBE_VFTDWBAL1", - "IXGBE_VFTDWBAH0", - "IXGBE_VFTDWBAH1" -}; - - static int ixgbevf_get_regs_len(struct net_device *netdev) { - return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32); +#define IXGBE_REGS_LEN 45 + return IXGBE_REGS_LEN * sizeof(u32); } static void ixgbevf_get_regs(struct net_device *netdev, @@ -264,9 +232,6 @@ static void ixgbevf_get_regs(struct net_device *netdev, regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); for (i = 0; i < 2; i++) regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); - - for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++) - hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]); } static void ixgbevf_get_drvinfo(struct net_device *netdev, @@ -441,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + char *base = (char *) adapter; int i; +#ifdef BP_EXTENDED_STATS + u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, + tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_yields += adapter->rx_ring[i].bp_yields; + rx_cleaned += adapter->rx_ring[i].bp_cleaned; + rx_yields += adapter->rx_ring[i].bp_yields; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_yields += adapter->tx_ring[i].bp_yields; + tx_cleaned += adapter->tx_ring[i].bp_cleaned; + tx_yields += adapter->tx_ring[i].bp_yields; + } + + adapter->bp_rx_yields = rx_yields; + adapter->bp_rx_cleaned = rx_cleaned; + adapter->bp_rx_missed = rx_missed; + + adapter->bp_tx_yields = tx_yields; + adapter->bp_tx_cleaned = tx_cleaned; + adapter->bp_tx_missed = tx_missed; +#endif ixgbevf_update_stats(adapter); for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter + - ixgbe_gstrings_stats[i].stat_offset; - char *b = (char *)adapter + - ixgbe_gstrings_stats[i].base_stat_offset; - char *r = (char *)adapter + - ixgbe_gstrings_stats[i].saved_reset_offset; - data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + - ((ixgbe_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)r : *(u32 *)r); + char *p = base + ixgbe_gstrings_stats[i].stat_offset; + char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; + + if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; + else + data[i] = *(u64 *)p; + } else { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; + else + data[i] = *(u32 *)p; + } } } @@ -685,6 +678,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev) return 0; } +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int num_vectors, i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count + && ec->tx_coalesce_usecs) + return -EINVAL; + + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, @@ -700,6 +772,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_sset_count = ixgbevf_get_sset_count, .get_strings = ixgbevf_get_strings, .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index fff0d986752..8971e2d0a98 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -38,6 +38,11 @@ #include "vf.h" +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#define BP_EXTENDED_STATS +#endif + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbevf_tx_buffer { @@ -76,6 +81,11 @@ struct ixgbevf_ring { struct u64_stats_sync syncp; u64 hw_csum_rx_error; u64 hw_csum_rx_good; +#ifdef BP_EXTENDED_STATS + u64 bp_yields; + u64 bp_misses; + u64 bp_cleaned; +#endif u16 head; u16 tail; @@ -145,7 +155,118 @@ struct ixgbevf_q_vector { struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define IXGBEVF_QV_STATE_IDLE 0 +#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ }; +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->bp_yields++; +#endif + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->bp_yields++; +#endif + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ /* * microsecond values for various ITR rates shifted by 2 to fit itr register @@ -165,9 +286,13 @@ struct ixgbevf_q_vector { ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG -#define IXGBE_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} #define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) @@ -240,7 +365,6 @@ struct ixgbevf_adapter { struct ixgbe_hw hw; u16 msg_enable; struct ixgbevf_hw_stats stats; - u64 zero_base; /* Interrupt Throttle Rate */ u32 eitr_param; @@ -249,6 +373,16 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; +#ifdef BP_EXTENDED_STATS + u64 bp_rx_yields; + u64 bp_rx_cleaned; + u64 bp_rx_missed; + + u64 bp_tx_yields; + u64 bp_tx_cleaned; + u64 bp_tx_missed; +#endif + u32 link_speed; bool link_up; @@ -281,27 +415,25 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; -extern void ixgbevf_up(struct ixgbevf_adapter *adapter); -extern void ixgbevf_down(struct ixgbevf_adapter *adapter); -extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); -extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); -extern void ixgbevf_set_ethtool_ops(struct net_device *netdev); -extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, - struct ixgbevf_ring *); -extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); -extern int ethtool_ioctl(struct ifreq *ifr); - -extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); -extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); +void ixgbevf_up(struct ixgbevf_adapter *adapter); +void ixgbevf_down(struct ixgbevf_adapter *adapter); +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); +void ixgbevf_reset(struct ixgbevf_adapter *adapter); +void ixgbevf_set_ethtool_ops(struct net_device *netdev); +int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); +int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); +void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); +void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); +int ethtool_ioctl(struct ifreq *ifr); + +extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); + +void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); +void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); #ifdef DEBUG -extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); #define hw_dbg(hw, format, arg...) \ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) #else diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59a62bbfb37..92ef4cb5a8e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.7.12-k" +#define DRV_VERSION "2.11.3-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; @@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, } /** + * ixgbevf_rx_skb - Helper function to determine proper Rx method + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + * @status: hardware indication of status of receive + * @rx_desc: rx descriptor + **/ +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb, u8 status, + union ixgbe_adv_rx_desc *rx_desc) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector)) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + ixgbevf_receive_skb(q_vector, skb, status, rx_desc); +} + +/** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: pointer to Rx descriptor ring structure * @status_err: hardware indication of status of receive @@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } -static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *rx_ring, - int budget) +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; @@ -473,15 +497,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, total_rx_bytes += skb->len; total_rx_packets++; - /* - * Work around issue of some types of VM to VM loop back - * packets not getting split correctly - */ - if (staterr & IXGBE_RXD_STAT_LB) { - u32 header_fixup_len = skb_headlen(skb); - if (header_fixup_len < 14) - skb_push(skb, header_fixup_len); - } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast @@ -494,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, goto next_desc; } - ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); + ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; @@ -514,7 +529,7 @@ next_desc: } rx_ring->next_to_clean = i; - cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + cleaned_count = ixgbevf_desc_unused(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); @@ -526,7 +541,7 @@ next_desc: q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - return !!budget; + return total_rx_packets; } /** @@ -549,6 +564,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); +#ifdef CONFIG_NET_RX_BUSY_POLL + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif + /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) @@ -558,10 +578,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, - per_ring_budget); + clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget) + < per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); +#endif + /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -580,7 +605,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information */ -static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; @@ -596,6 +621,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->bp_cleaned += found; + else + ring->bp_misses++; +#endif + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -756,37 +815,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; - struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; - u32 msg; - bool got_ack = false; hw->mac.get_link_status = 1; - if (!hw->mbx.ops.check_for_ack(hw)) - got_ack = true; - - if (!hw->mbx.ops.check_for_msg(hw)) { - hw->mbx.ops.read(hw, &msg, 1); - - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); - adapter->link_up = false; - } - if (msg & IXGBE_VT_MSGTYPE_NACK) - dev_info(&pdev->dev, - "Last Request of type %2.2x to PF Nacked\n", - msg & 0xFF); - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; - } - - /* checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it - */ - if (got_ack) - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); @@ -1107,6 +1141,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + + if (adapter->num_rx_queues > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); +} + static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1154,8 +1203,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) int i, j; u32 rdlen; - /* PSRTYPE must be initialized in 82599 */ - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + ixgbevf_setup_psrtype(adapter); /* set_rx_buffer_len must be called before ring initialization */ ixgbevf_set_rx_buffer_len(adapter); @@ -1293,6 +1341,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif napi_enable(&q_vector->napi); } } @@ -1306,6 +1357,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ } } @@ -1323,31 +1380,55 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, - IXGBE_DESC_UNUSED(ring)); + ixgbevf_desc_unused(ring)); } } -#define IXGBE_MAX_RX_DESC_POLL 10 -static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, - int rxr) +#define IXGBEVF_MAX_RX_DESC_POLL 10 +static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, + int rxr) { struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; int j = adapter->rx_ring[rxr].reg_idx; - int k; - for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { - if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) - break; - else - msleep(1); - } - if (k >= IXGBE_MAX_RX_DESC_POLL) { - hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " - "not set within the polling period\n", rxr); - } + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); - ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], - adapter->rx_ring[rxr].count - 1); + if (!wait_loop) + hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n", + rxr); + + ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], + (adapter->rx_ring[rxr].count - 1)); +} + +static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) @@ -1545,8 +1626,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - ixgbevf_negotiate_api(adapter); - ixgbevf_reset_queues(adapter); ixgbevf_configure(adapter); @@ -1679,7 +1758,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) /* signal that we are down to the interrupt handler */ set_bit(__IXGBEVF_DOWN, &adapter->state); - /* disable receives */ + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]); netif_tx_disable(netdev); @@ -1733,10 +1815,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - if (hw->mac.ops.reset_hw(hw)) + if (hw->mac.ops.reset_hw(hw)) { hw_dbg(hw, "PF still resetting\n"); - else + } else { hw->mac.ops.init_hw(hw); + ixgbevf_negotiate_api(adapter); + } if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, @@ -1929,6 +2013,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif adapter->q_vector[q_idx] = q_vector; } @@ -1938,6 +2025,9 @@ err_out: while (q_idx) { q_idx--; q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); adapter->q_vector[q_idx] = NULL; @@ -1961,6 +2051,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif netif_napi_del(&q_vector->napi); kfree(q_vector); } @@ -2072,6 +2165,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, @@ -2082,6 +2178,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) pr_err("init_shared_code failed: %d\n", err); goto out; } + ixgbevf_negotiate_api(adapter); err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); if (err) dev_info(&pdev->dev, "Error reading MAC address\n"); @@ -2097,9 +2194,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); } - /* lock to protect mailbox accesses */ - spin_lock_init(&adapter->mbx_lock); - /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; @@ -2620,8 +2714,6 @@ static int ixgbevf_open(struct net_device *netdev) } } - ixgbevf_negotiate_api(adapter); - /* setup queue reg_idx and Rx queue count */ err = ixgbevf_setup_queues(adapter); if (err) @@ -3010,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) /* We need to check again in a case another CPU has just * made room available. */ - if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) + if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ @@ -3021,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } @@ -3216,6 +3308,8 @@ static int ixgbevf_resume(struct pci_dev *pdev) } pci_set_master(pdev); + ixgbevf_reset(adapter); + rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); rtnl_unlock(); @@ -3224,8 +3318,6 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } - ixgbevf_reset(adapter); - if (netif_running(netdev)) { err = ixgbevf_open(netdev); if (err) @@ -3293,6 +3385,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -3326,19 +3421,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && - !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; } pci_using_dac = 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 387b52635bc..4d44d64ae38 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; if (addr) - memcpy(msg_addr, addr, 6); + memcpy(msg_addr, addr, ETH_ALEN); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, 6); + memcpy(msg_addr, addr, ETH_ALEN); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 23de82a9da8..f5685c0d057 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -309,7 +309,7 @@ static void jme_load_macaddr(struct net_device *netdev) { struct jme_adapter *jme = netdev_priv(netdev); - unsigned char macaddr[6]; + unsigned char macaddr[ETH_ALEN]; u32 val; spin_lock_bh(&jme->macaddr_lock); @@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev) val = jread32(jme, JME_RXUMA_HI); macaddr[4] = (val >> 0) & 0xFF; macaddr[5] = (val >> 8) & 0xFF; - memcpy(netdev->dev_addr, macaddr, 6); + memcpy(netdev->dev_addr, macaddr, ETH_ALEN); spin_unlock_bh(&jme->macaddr_lock); } @@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev, err_out_unmap: iounmap(jme->regs); err_out_free_netdev: - pci_set_drvdata(pdev, NULL); free_netdev(netdev); err_out_release_regions: pci_release_regions(pdev); @@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev) unregister_netdev(netdev); iounmap(jme->regs); - pci_set_drvdata(pdev, NULL); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 3efc897c991..58cd67c0c8e 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -28,7 +28,6 @@ #define DRV_NAME "jme" #define DRV_VERSION "1.0.8" -#define PFX DRV_NAME ": " #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 #define PCI_DEVICE_ID_JMICRON_JMC260 0x0260 diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index a36fa80968e..4a5e3b0f712 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev) lp = netdev_priv(dev); bif->dev = dev; - memcpy(dev->dev_addr, bif->mac, 6); + memcpy(dev->dev_addr, bif->mac, ETH_ALEN); lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 7fb5677451f..61088a6a942 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); - - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } static void mib_counters_timer_wrapper(unsigned long _mp) { struct mv643xx_eth_private *mp = (void *)_mp; - mib_counters_update(mp); + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev) mp->int_mask |= INT_TX_END_0 << i; } + add_timer(&mp->mib_counters_timer); port_start(mp); wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); @@ -2514,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, mac_addr = of_get_mac_address(pnp); if (mac_addr) - memcpy(ppd.mac_addr, mac_addr, 6); + memcpy(ppd.mac_addr, mac_addr, ETH_ALEN); mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); @@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, if (!ppdev) return -ENOMEM; ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + ppdev->dev.of_node = pnp; ret = platform_device_add_resources(ppdev, &res, 1); if (ret) @@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp, struct net_device *dev = mp->dev; if (is_valid_ether_addr(pd->mac_addr)) - memcpy(dev->dev_addr, pd->mac_addr, 6); + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); else uc_addr_get(mp, dev->dev_addr); @@ -2890,6 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) PHY_INTERFACE_MODE_GMII); if (!mp->phy) err = -ENODEV; + else + phy_addr_set(mp, mp->phy->addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); @@ -2916,7 +2918,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->mib_counters_timer.data = (unsigned long)mp; mp->mib_counters_timer.function = mib_counters_timer_wrapper; mp->mib_counters_timer.expires = jiffies + 30 * HZ; - add_timer(&mp->mib_counters_timer); spin_lock_init(&mp->mib_counters_lock); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index e2f66266031..7354960b583 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -4,11 +4,9 @@ * Since the MDIO interface of Marvell network interfaces is shared * between all network interfaces, having a single driver allows to * handle concurrent accesses properly (you may have four Ethernet - * ports, but they in fact share the same SMI interface to access the - * MDIO bus). Moreover, this MDIO interface code is similar between - * the mv643xx_eth driver and the mvneta driver. For now, it is only - * used by the mvneta driver, but it could later be used by the - * mv643xx_eth driver as well. + * ports, but they in fact share the same SMI interface to access + * the MDIO bus). This driver is currently used by the mvneta and + * mv643xx_eth drivers. * * Copyright (C) 2012 Marvell * @@ -44,6 +42,15 @@ #define MVMDIO_ERR_INT_SMI_DONE 0x00000010 #define MVMDIO_ERR_INT_MASK 0x0080 +/* + * SMI Timeout measurements: + * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt) + * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) + */ +#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ +#define MVMDIO_SMI_POLL_INTERVAL_MIN 45 +#define MVMDIO_SMI_POLL_INTERVAL_MAX 55 + struct orion_mdio_dev { struct mutex lock; void __iomem *regs; @@ -68,77 +75,68 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) static int orion_mdio_wait_ready(struct mii_bus *bus) { struct orion_mdio_dev *dev = bus->priv; - int count; - - if (dev->err_interrupt <= 0) { - count = 0; - while (1) { - if (orion_mdio_smi_is_done(dev)) - break; - - if (count > 100) { - dev_err(bus->parent, - "Timeout: SMI busy for too long\n"); - return -ETIMEDOUT; - } - - udelay(10); - count++; - } - } else { - if (!orion_mdio_smi_is_done(dev)) { + unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); + unsigned long end = jiffies + timeout; + int timedout = 0; + + while (1) { + if (orion_mdio_smi_is_done(dev)) + return 0; + else if (timedout) + break; + + if (dev->err_interrupt <= 0) { + usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN, + MVMDIO_SMI_POLL_INTERVAL_MAX); + + if (time_is_before_jiffies(end)) + ++timedout; + } else { wait_event_timeout(dev->smi_busy_wait, - orion_mdio_smi_is_done(dev), - msecs_to_jiffies(100)); - if (!orion_mdio_smi_is_done(dev)) - return -ETIMEDOUT; - } + orion_mdio_smi_is_done(dev), + timeout); + + ++timedout; + } } - return 0; + dev_err(bus->parent, "Timeout: SMI busy for too long\n"); + return -ETIMEDOUT; } static int orion_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct orion_mdio_dev *dev = bus->priv; - int count; u32 val; int ret; mutex_lock(&dev->lock); ret = orion_mdio_wait_ready(bus); - if (ret < 0) { - mutex_unlock(&dev->lock); - return ret; - } + if (ret < 0) + goto out; writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | MVMDIO_SMI_READ_OPERATION), dev->regs); - /* Wait for the value to become available */ - count = 0; - while (1) { - val = readl(dev->regs); - if (val & MVMDIO_SMI_READ_VALID) - break; - - if (count > 100) { - dev_err(bus->parent, "Timeout when reading PHY\n"); - mutex_unlock(&dev->lock); - return -ETIMEDOUT; - } + ret = orion_mdio_wait_ready(bus); + if (ret < 0) + goto out; - udelay(10); - count++; + val = readl(dev->regs); + if (!(val & MVMDIO_SMI_READ_VALID)) { + dev_err(bus->parent, "SMI bus read not valid\n"); + ret = -ENODEV; + goto out; } + ret = val & 0xFFFF; +out: mutex_unlock(&dev->lock); - - return val & 0xFFFF; + return ret; } static int orion_mdio_write(struct mii_bus *bus, int mii_id, @@ -150,10 +148,8 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id, mutex_lock(&dev->lock); ret = orion_mdio_wait_ready(bus); - if (ret < 0) { - mutex_unlock(&dev->lock); - return ret; - } + if (ret < 0) + goto out; writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | @@ -161,9 +157,9 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id, (value << MVMDIO_SMI_DATA_SHIFT)), dev->regs); +out: mutex_unlock(&dev->lock); - - return 0; + return ret; } static int orion_mdio_reset(struct mii_bus *bus) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e35bac7cfdf..d5f0d72e5e3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, dev_kfree_skb_any(skb); dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); } if (rx_done) @@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); @@ -2792,6 +2792,9 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); + u64_stats_init(&pp->tx_stats.syncp); + u64_stats_init(&pp->rx_stats.syncp); + pp->weight = MVNETA_RX_POLL_WEIGHT; pp->phy_node = phy_node; pp->phy_interface = phy_mode; @@ -2811,7 +2814,7 @@ static int mvneta_probe(struct platform_device *pdev) } dt_mac_addr = of_get_mac_address(dn); - if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { + if (dt_mac_addr) { mac_from = "device tree"; memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); } else { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index ecc7f7b696b..59784619386 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4046,7 +4046,6 @@ err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return err; } @@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev) iounmap(hw->regs); kfree(hw); - pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index e09a8c6f853..43aa7acd84a 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4763,6 +4763,9 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, sky2->hw = hw; sky2->msg_enable = netif_msg_init(debug, default_msg); + u64_stats_init(&sky2->tx_stats.syncp); + u64_stats_init(&sky2->rx_stats.syncp); + /* Auto speed and flow control */ sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; if (hw->chip_id != CHIP_ID_YUKON_XL) @@ -5081,7 +5084,6 @@ err_out_free_regions: err_out_disable: pci_disable_device(pdev); err_out: - pci_set_drvdata(pdev, NULL); return err; } @@ -5124,8 +5126,6 @@ static void sky2_remove(struct pci_dev *pdev) iounmap(hw->regs); kfree(hw); - - pci_set_drvdata(pdev, NULL); } static int sky2_suspend(struct device *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index ea20182c696..1e9970d2f0f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1539,11 +1539,6 @@ out: return ret; } -static int calculate_transition(u16 oper_vlan, u16 admin_vlan) -{ - return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); -} - static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, int slave, int port) { @@ -1553,7 +1548,6 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, struct mlx4_dev *dev = &(priv->dev); int err; int admin_vlan_ix = NO_INDX; - enum mlx4_vlan_transition vlan_trans; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; @@ -1563,12 +1557,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, vp_oper->state.link_state == vp_admin->link_state) return 0; - vlan_trans = calculate_transition(vp_oper->state.default_vlan, - vp_admin->default_vlan); - if (!(priv->mfunc.master.slave_state[slave].active && - dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && - vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) { + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { /* even if the UPDATE_QP command isn't supported, we still want * to set this VF link according to the admin directive */ @@ -1586,15 +1576,19 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, return -ENOMEM; if (vp_oper->state.default_vlan != vp_admin->default_vlan) { - err = __mlx4_register_vlan(&priv->dev, port, - vp_admin->default_vlan, - &admin_vlan_ix); - if (err) { - kfree(work); - mlx4_warn((&priv->dev), - "No vlan resources slave %d, port %d\n", - slave, port); - return err; + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + kfree(work); + mlx4_warn((&priv->dev), + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + } else { + admin_vlan_ix = NO_INDX; } work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; mlx4_dbg((&(priv->dev)), @@ -1687,11 +1681,11 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (NO_INDX != vp_oper->vlan_idx) { __mlx4_unregister_vlan(&priv->dev, - port, vp_oper->vlan_idx); + port, vp_oper->state.default_vlan); vp_oper->vlan_idx = NO_INDX; } if (NO_INDX != vp_oper->mac_idx) { - __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx); + __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); vp_oper->mac_idx = NO_INDX; } } @@ -1718,6 +1712,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (cmd == MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "Received reset from slave:%d\n", slave); slave_state[slave].active = false; + slave_state[slave].old_vlan_api = false; mlx4_master_deactivate_admin_state(priv, slave); for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { slave_state[slave].event_eq[i].eqn = -1; @@ -2198,6 +2193,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) return ERR_PTR(-ENOMEM); } + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + return mailbox; } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); @@ -2253,7 +2250,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_vport_oper_state *vf_oper; struct mlx4_vport_state *vf_admin; int slave; @@ -2269,7 +2265,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) return -EINVAL; vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; - vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if ((0 == vlan) && (0 == qos)) vf_admin->default_vlan = MLX4_VGT; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 004e4231af6..22fcbe78311 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -128,8 +128,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, return PTR_ERR(mailbox); cq_context = mailbox->buf; - memset(cq_context, 0, sizeof *cq_context); - cq_context->cq_max_count = cpu_to_be16(count); cq_context->cq_period = cpu_to_be16(period); @@ -153,8 +151,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, return PTR_ERR(mailbox); cq_context = mailbox->buf; - memset(cq_context, 0, sizeof *cq_context); - cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); cq_context->log_page_size = mtt->page_shift - 12; mtt_addr = mlx4_mtt_addr(dev, mtt); @@ -274,8 +270,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, } cq_context = mailbox->buf; - memset(cq_context, 0, sizeof *cq_context); - cq_context->flags = cpu_to_be32(!!collapsed << 18); if (timestamp_en) cq_context->flags |= cpu_to_be32(1 << 19); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 3e2d5047cdb..3a098cc4d34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -44,12 +44,23 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) int mlx4_en_create_cq(struct mlx4_en_priv *priv, - struct mlx4_en_cq *cq, - int entries, int ring, enum cq_type mode) + struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, + int node) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; int err; + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); + if (!cq) { + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + en_err(priv, "Failed to allocate CQ structure\n"); + return -ENOMEM; + } + } + cq->size = entries; cq->buf_size = cq->size * mdev->dev->caps.cqe_size; @@ -57,17 +68,30 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->is_tx = mode; spin_lock_init(&cq->lock); + /* Allocate HW buffers on provided NUMA node. + * dev->numa_node is used in mtt range allocation flow. + */ + set_dev_node(&mdev->dev->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, cq->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); if (err) - return err; + goto err_cq; err = mlx4_en_map_buffer(&cq->wqres.buf); if (err) - mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - else - cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; + goto err_res; + cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; + *pcq = cq; + + return 0; + +err_res: + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); +err_cq: + kfree(cq); + *pcq = NULL; return err; } @@ -117,12 +141,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, struct mlx4_en_cq *rx_cq; cq_idx = cq_idx % priv->rx_ring_num; - rx_cq = &priv->rx_cq[cq_idx]; + rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; } if (!cq->is_tx) - cq->size = priv->rx_ring[cq->ring].actual_size; + cq->size = priv->rx_ring[cq->ring]->actual_size; if ((cq->is_tx && priv->hwtstamp_config.tx_type) || (!cq->is_tx && priv->hwtstamp_config.rx_filter)) @@ -146,9 +170,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, return 0; } -void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq = *pcq; mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); @@ -157,6 +182,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; + kfree(cq); + *pcq = NULL; } void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 0c750985f47..0596f9f85a0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) int err = 0; for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_cq[i].moder_cnt = priv->tx_frames; - priv->tx_cq[i].moder_time = priv->tx_usecs; + priv->tx_cq[i]->moder_cnt = priv->tx_frames; + priv->tx_cq[i]->moder_time = priv->tx_usecs; if (priv->port_up) { - err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); + err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); if (err) return err; } @@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) return 0; for (i = 0; i < priv->rx_ring_num; i++) { - priv->rx_cq[i].moder_cnt = priv->rx_frames; - priv->rx_cq[i].moder_time = priv->rx_usecs; + priv->rx_cq[i]->moder_cnt = priv->rx_frames; + priv->rx_cq[i]->moder_time = priv->rx_usecs; priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; if (priv->port_up) { - err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); + err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]); if (err) return err; } @@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, } } for (i = 0; i < priv->tx_ring_num; i++) { - data[index++] = priv->tx_ring[i].packets; - data[index++] = priv->tx_ring[i].bytes; + data[index++] = priv->tx_ring[i]->packets; + data[index++] = priv->tx_ring[i]->bytes; } for (i = 0; i < priv->rx_ring_num; i++) { - data[index++] = priv->rx_ring[i].packets; - data[index++] = priv->rx_ring[i].bytes; + data[index++] = priv->rx_ring[i]->packets; + data[index++] = priv->rx_ring[i]->bytes; #ifdef CONFIG_NET_RX_BUSY_POLL - data[index++] = priv->rx_ring[i].yields; - data[index++] = priv->rx_ring[i].misses; - data[index++] = priv->rx_ring[i].cleaned; + data[index++] = priv->rx_ring[i]->yields; + data[index++] = priv->rx_ring[i]->misses; + data[index++] = priv->rx_ring[i]->cleaned; #endif } spin_unlock_bh(&priv->stats_lock); @@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev, tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); - if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : - priv->rx_ring[0].size) && - tx_size == priv->tx_ring[0].size) + if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : + priv->rx_ring[0]->size) && + tx_size == priv->tx_ring[0]->size) return 0; mutex_lock(&mdev->state_lock); @@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev, param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; param->rx_pending = priv->port_up ? - priv->rx_ring[0].actual_size : priv->rx_ring[0].size; - param->tx_pending = priv->tx_ring[0].size; + priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; + param->tx_pending = priv->tx_ring[0]->size; } static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index a071cda2dd0..0d087b03a7b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; + /* Initialize time stamp mechanism */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_init_timestamp(mdev); + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { if (!dev->caps.comp_pool) { mdev->profile.prof[i].rx_ring_num = @@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mdev->pndev[i] = NULL; } - /* Initialize time stamp mechanism */ - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) - mlx4_en_init_timestamp(mdev); - return mdev; err_mr: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fa37b7a6121..e72d8a112a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi) struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; int done; if (!priv->port_up) @@ -102,6 +102,7 @@ struct mlx4_en_filter { struct list_head next; struct work_struct work; + u8 ip_proto; __be32 src_ip; __be32 dst_ip; __be16 src_port; @@ -120,14 +121,26 @@ struct mlx4_en_filter { static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); +static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) +{ + switch (ip_proto) { + case IPPROTO_UDP: + return MLX4_NET_TRANS_RULE_ID_UDP; + case IPPROTO_TCP: + return MLX4_NET_TRANS_RULE_ID_TCP; + default: + return -EPROTONOSUPPORT; + } +}; + static void mlx4_en_filter_work(struct work_struct *work) { struct mlx4_en_filter *filter = container_of(work, struct mlx4_en_filter, work); struct mlx4_en_priv *priv = filter->priv; - struct mlx4_spec_list spec_tcp = { - .id = MLX4_NET_TRANS_RULE_ID_TCP, + struct mlx4_spec_list spec_tcp_udp = { + .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), { .tcp_udp = { .dst_port = filter->dst_port, @@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work) int rc; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + if (spec_tcp_udp.id < 0) { + en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", + filter->ip_proto); + goto ignore; + } list_add_tail(&spec_eth.list, &rule.list); list_add_tail(&spec_ip.list, &rule.list); - list_add_tail(&spec_tcp.list, &rule.list); + list_add_tail(&spec_tcp_udp.list, &rule.list); rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); @@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work) if (rc) en_err(priv, "Error attaching flow. err = %d\n", rc); +ignore: mlx4_en_filter_rfs_expire(priv); filter->activated = 1; @@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, static struct mlx4_en_filter * mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, - __be32 dst_ip, __be16 src_port, __be16 dst_port, - u32 flow_id) + __be32 dst_ip, u8 ip_proto, __be16 src_port, + __be16 dst_port, u32 flow_id) { struct mlx4_en_filter *filter = NULL; @@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, filter->src_ip = src_ip; filter->dst_ip = dst_ip; + filter->ip_proto = ip_proto; filter->src_port = src_port; filter->dst_port = dst_port; @@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter) static inline struct mlx4_en_filter * mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, - __be16 src_port, __be16 dst_port) + u8 ip_proto, __be16 src_port, __be16 dst_port) { struct mlx4_en_filter *filter; struct mlx4_en_filter *ret = NULL; @@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, filter_chain) { if (filter->src_ip == src_ip && filter->dst_ip == dst_ip && + filter->ip_proto == ip_proto && filter->src_port == src_port && filter->dst_port == dst_port) { ret = filter; @@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, struct mlx4_en_filter *filter; const struct iphdr *ip; const __be16 *ports; + u8 ip_proto; __be32 src_ip; __be32 dst_ip; __be16 src_port; @@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, if (ip_is_fragment(ip)) return -EPROTONOSUPPORT; + if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) + return -EPROTONOSUPPORT; ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + ip_proto = ip->protocol; src_ip = ip->saddr; dst_ip = ip->daddr; src_port = ports[0]; dst_port = ports[1]; - if (ip->protocol != IPPROTO_TCP) - return -EPROTONOSUPPORT; - spin_lock_bh(&priv->filters_lock); - filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port); + filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, + src_port, dst_port); if (filter) { if (filter->rxq_index == rxq_index) goto out; @@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, filter->rxq_index = rxq_index; } else { filter = mlx4_en_filter_alloc(priv, rxq_index, - src_ip, dst_ip, + src_ip, dst_ip, ip_proto, src_port, dst_port, flow_id); if (!filter) { ret = -ENOMEM; @@ -332,8 +355,7 @@ err: return ret; } -void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *rx_ring) +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) { struct mlx4_en_filter *filter, *tmp; LIST_HEAD(del_list); @@ -417,7 +439,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err; - int idx; en_dbg(HW, priv, "Killing VID:%d\n", vid); @@ -425,10 +446,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, /* Remove VID from port VLAN filter */ mutex_lock(&mdev->state_lock); - if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) - mlx4_unregister_vlan(mdev->dev, priv->port, idx); - else - en_dbg(HW, priv, "could not find vid %d in cache\n", vid); + mlx4_unregister_vlan(mdev->dev, priv->port, vid); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); @@ -1223,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev) int i; for (i = 0; i < priv->rx_ring_num; i++) { - cq = &priv->rx_cq[i]; + cq = priv->rx_cq[i]; spin_lock_irqsave(&cq->lock, flags); napi_synchronize(&cq->napi); mlx4_en_process_rx_cq(dev, cq, 0); @@ -1245,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev) if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) continue; en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", - i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, - priv->tx_ring[i].cons, priv->tx_ring[i].prod); + i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, + priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); } priv->port_stats.tx_timeout++; @@ -1286,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) /* Setup cq moderation params */ for (i = 0; i < priv->rx_ring_num; i++) { - cq = &priv->rx_cq[i]; + cq = priv->rx_cq[i]; cq->moder_cnt = priv->rx_frames; cq->moder_time = priv->rx_usecs; priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; @@ -1295,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) } for (i = 0; i < priv->tx_ring_num; i++) { - cq = &priv->tx_cq[i]; + cq = priv->tx_cq[i]; cq->moder_cnt = priv->tx_frames; cq->moder_time = priv->tx_usecs; } @@ -1329,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) for (ring = 0; ring < priv->rx_ring_num; ring++) { spin_lock_bh(&priv->stats_lock); - rx_packets = priv->rx_ring[ring].packets; - rx_bytes = priv->rx_ring[ring].bytes; + rx_packets = priv->rx_ring[ring]->packets; + rx_bytes = priv->rx_ring[ring]->bytes; spin_unlock_bh(&priv->stats_lock); rx_pkt_diff = ((unsigned long) (rx_packets - @@ -1359,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) if (moder_time != priv->last_moder_time[ring]) { priv->last_moder_time[ring] = moder_time; - cq = &priv->rx_cq[ring]; + cq = priv->rx_cq[ring]; cq->moder_time = moder_time; cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); @@ -1482,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev) return err; } for (i = 0; i < priv->rx_ring_num; i++) { - cq = &priv->rx_cq[i]; + cq = priv->rx_cq[i]; mlx4_en_cq_init_lock(cq); @@ -1500,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev) goto cq_err; } mlx4_en_arm_cq(priv, cq); - priv->rx_ring[i].cqn = cq->mcq.cqn; + priv->rx_ring[i]->cqn = cq->mcq.cqn; ++rx_index; } @@ -1526,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev) /* Configure tx cq's and rings */ for (i = 0; i < priv->tx_ring_num; i++) { /* Configure cq */ - cq = &priv->tx_cq[i]; + cq = priv->tx_cq[i]; err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed allocating Tx CQ\n"); @@ -1542,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev) cq->buf->wqe_index = cpu_to_be16(0xffff); /* Configure ring */ - tx_ring = &priv->tx_ring[i]; + tx_ring = priv->tx_ring[i]; err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, i / priv->num_tx_rings_p_up); if (err) { @@ -1612,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev) tx_err: while (tx_index--) { - mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); - mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); } mlx4_en_destroy_drop_qp(priv); rss_err: @@ -1622,9 +1640,9 @@ mac_err: mlx4_en_put_qp(priv); cq_err: while (rx_index--) - mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); + mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); for (i = 0; i < priv->rx_ring_num; i++) - mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); return err; /* need to close devices */ } @@ -1720,25 +1738,25 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { - mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); - mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); } msleep(10); for (i = 0; i < priv->tx_ring_num; i++) - mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); + mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); /* Free RSS qps */ mlx4_en_release_rss_steer(priv); /* Unregister Mac address for the port */ mlx4_en_put_qp(priv); - if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) mdev->mac_removed[priv->port] = 1; /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { - struct mlx4_en_cq *cq = &priv->rx_cq[i]; + struct mlx4_en_cq *cq = priv->rx_cq[i]; local_bh_disable(); while (!mlx4_en_cq_lock_napi(cq)) { @@ -1749,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) msleep(1); - mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); } } @@ -1787,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev) memset(&priv->port_stats, 0, sizeof(priv->port_stats)); for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_ring[i].bytes = 0; - priv->tx_ring[i].packets = 0; - priv->tx_ring[i].tx_csum = 0; + priv->tx_ring[i]->bytes = 0; + priv->tx_ring[i]->packets = 0; + priv->tx_ring[i]->tx_csum = 0; } for (i = 0; i < priv->rx_ring_num; i++) { - priv->rx_ring[i].bytes = 0; - priv->rx_ring[i].packets = 0; - priv->rx_ring[i].csum_ok = 0; - priv->rx_ring[i].csum_none = 0; + priv->rx_ring[i]->bytes = 0; + priv->rx_ring[i]->packets = 0; + priv->rx_ring[i]->csum_ok = 0; + priv->rx_ring[i]->csum_none = 0; } } @@ -1852,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) #endif for (i = 0; i < priv->tx_ring_num; i++) { - if (priv->tx_ring[i].tx_info) + if (priv->tx_ring && priv->tx_ring[i]) mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); - if (priv->tx_cq[i].buf) + if (priv->tx_cq && priv->tx_cq[i]) mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); } for (i = 0; i < priv->rx_ring_num; i++) { - if (priv->rx_ring[i].rx_info) + if (priv->rx_ring[i]) mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], priv->prof->rx_ring_size, priv->stride); - if (priv->rx_cq[i].buf) + if (priv->rx_cq[i]) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } @@ -1877,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) struct mlx4_en_port_profile *prof = priv->prof; int i; int err; + int node; err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); if (err) { @@ -1886,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) /* Create tx Rings */ for (i = 0; i < priv->tx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); if (mlx4_en_create_cq(priv, &priv->tx_cq[i], - prof->tx_ring_size, i, TX)) + prof->tx_ring_size, i, TX, node)) goto err; if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, - prof->tx_ring_size, TXBB_SIZE)) + prof->tx_ring_size, TXBB_SIZE, node)) goto err; } /* Create rx Rings */ for (i = 0; i < priv->rx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); if (mlx4_en_create_cq(priv, &priv->rx_cq[i], - prof->rx_ring_size, i, RX)) + prof->rx_ring_size, i, RX, node)) goto err; if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], - prof->rx_ring_size, priv->stride)) + prof->rx_ring_size, priv->stride, + node)) goto err; } @@ -1918,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) err: en_err(priv, "Failed to allocate NIC resources\n"); + for (i = 0; i < priv->rx_ring_num; i++) { + if (priv->rx_ring[i]) + mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], + prof->rx_ring_size, + priv->stride); + if (priv->rx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); + } + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring[i]) + mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); + if (priv->tx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + } return -ENOMEM; } @@ -2211,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; - priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, + priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_ring) { err = -ENOMEM; goto out; } - priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 331791467a2..dae1a1f4ae5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -56,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) return PTR_ERR(mailbox); filter = mailbox->buf; - memset(filter, 0, sizeof(*filter)); for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { entry = 0; for (j = 0; j < 32; j++) @@ -81,7 +80,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - memset(mailbox->buf, 0, sizeof(*qport_context)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); @@ -127,7 +125,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - memset(mailbox->buf, 0, sizeof(*mlx4_en_stats)); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); @@ -143,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; for (i = 0; i < priv->rx_ring_num; i++) { - stats->rx_packets += priv->rx_ring[i].packets; - stats->rx_bytes += priv->rx_ring[i].bytes; - priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; - priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; + stats->rx_packets += priv->rx_ring[i]->packets; + stats->rx_bytes += priv->rx_ring[i]->bytes; + priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; + priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; } stats->tx_packets = 0; stats->tx_bytes = 0; priv->port_stats.tx_chksum_offload = 0; for (i = 0; i < priv->tx_ring_num; i++) { - stats->tx_packets += priv->tx_ring[i].packets; - stats->tx_bytes += priv->tx_ring[i].bytes; - priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; + stats->tx_packets += priv->tx_ring[i]->packets; + stats->tx_bytes += priv->tx_ring[i]->bytes; + priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index dec455c8f62..07a1d0fbae4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, put_page(page); return -ENOMEM; } - page_alloc->size = PAGE_SIZE << order; + page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; - page_alloc->offset = frag_info->frag_align; + page_alloc->page_offset = frag_info->frag_align; /* Not doing get_page() for each frag is a big win * on asymetric workloads. */ - atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); + atomic_set(&page->_count, + page_alloc->page_size / frag_info->frag_stride); return 0; } @@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { frag_info = &priv->frag_info[i]; page_alloc[i] = ring_alloc[i]; - page_alloc[i].offset += frag_info->frag_stride; - if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) + page_alloc[i].page_offset += frag_info->frag_stride; + + if (page_alloc[i].page_offset + frag_info->frag_stride <= + ring_alloc[i].page_size) continue; + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) goto out; } for (i = 0; i < priv->num_frags; i++) { frags[i] = ring_alloc[i]; - dma = ring_alloc[i].dma + ring_alloc[i].offset; + dma = ring_alloc[i].dma + ring_alloc[i].page_offset; ring_alloc[i] = page_alloc[i]; rx_desc->data[i].addr = cpu_to_be64(dma); } @@ -117,7 +121,7 @@ out: frag_info = &priv->frag_info[i]; if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].size, PCI_DMA_FROMDEVICE); + page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page = page_alloc[i].page; atomic_set(&page->_count, 1); put_page(page); @@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, int i) { const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; - if (frags[i].offset + frag_info->frag_stride > frags[i].size) - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, - PCI_DMA_FROMDEVICE); + + if (next_frag_end > frags[i].page_size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, + PCI_DMA_FROMDEVICE); if (frags[i].page) put_page(frags[i].page); @@ -161,7 +167,7 @@ out: page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, PCI_DMA_FROMDEVICE); page = page_alloc->page; atomic_set(&page->_count, 1); put_page(page); @@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); - while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { + page_alloc->page_size, PCI_DMA_FROMDEVICE); + while (page_alloc->page_offset + frag_info->frag_stride < + page_alloc->page_size) { put_page(page_alloc->page); - page_alloc->offset += frag_info->frag_stride; + page_alloc->page_offset += frag_info->frag_stride; } page_alloc->page = NULL; } @@ -257,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, @@ -282,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; @@ -312,12 +319,23 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, } int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, u32 size, u16 stride) + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring; int err = -ENOMEM; int tmp; + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed to allocate RX ring structure\n"); + return -ENOMEM; + } + } + ring->prod = 0; ring->cons = 0; ring->size = size; @@ -328,17 +346,25 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); - ring->rx_info = vmalloc(tmp); - if (!ring->rx_info) - return -ENOMEM; + ring->rx_info = vmalloc_node(tmp, node); + if (!ring->rx_info) { + ring->rx_info = vmalloc(tmp); + if (!ring->rx_info) { + err = -ENOMEM; + goto err_ring; + } + } en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", ring->rx_info, tmp); + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); if (err) - goto err_ring; + goto err_info; err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { @@ -349,13 +375,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; + *pring = ring; return 0; err_hwq: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); -err_ring: +err_info: vfree(ring->rx_info); ring->rx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; + return err; } @@ -369,12 +400,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) DS_SIZE * priv->num_frags); for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; ring->prod = 0; ring->cons = 0; ring->actual_size = 0; - ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; ring->stride = stride; if (ring->stride <= TXBB_SIZE) @@ -405,7 +436,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) goto err_buffers; for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; ring->size_mask = ring->actual_size - 1; mlx4_en_update_rx_prod_db(ring); @@ -415,30 +446,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) err_buffers: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) - mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); + mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); ring_ind = priv->rx_ring_num - 1; err_allocator: while (ring_ind >= 0) { - if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) - priv->rx_ring[ring_ind].buf -= TXBB_SIZE; - mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); + if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) + priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; + mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); ring_ind--; } return err; } void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, u32 size, u16 stride) + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring = *pring; mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; + kfree(ring); + *pring = NULL; #ifdef CONFIG_RFS_ACCEL - mlx4_en_cleanup_filters(priv, ring); + mlx4_en_cleanup_filters(priv); #endif } @@ -478,7 +513,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, /* Save page reference in skb */ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); - skb_frags_rx[nr].page_offset = frags[nr].offset; + skb_frags_rx[nr].page_offset = frags[nr].page_offset; skb->truesize += frag_info->frag_stride; frags[nr].page = NULL; } @@ -517,7 +552,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, /* Get pointer to first fragment so we could copy the headers into the * (linear part of the) skb */ - va = page_address(frags[0].page) + frags[0].offset; + va = page_address(frags[0].page) + frags[0].page_offset; if (length <= SMALL_PACKET_SIZE) { /* We are copying all relevant data to the skb - temporarily @@ -585,7 +620,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_cqe *cqe; - struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; struct mlx4_en_rx_desc *rx_desc; struct sk_buff *skb; @@ -645,7 +680,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); ethh = (struct ethhdr *)(page_address(frags[0].page) + - frags[0].offset); + frags[0].page_offset); if (is_multicast_ether_addr(ethh->h_dest)) { struct mlx4_mac_entry *entry; @@ -984,7 +1019,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) for (i = 0; i < priv->rx_ring_num; i++) { qpn = rss_map->base_qpn + i; - err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], + err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], &rss_map->state[i], &rss_map->qps[i]); if (err) @@ -1001,7 +1036,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) } rss_map->indir_qp.event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, - priv->rx_ring[0].cqn, -1, &context); + priv->rx_ring[0]->cqn, -1, &context); if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) rss_rings = priv->rx_ring_num; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2448f0d669e..c11d063473e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - struct mlx4_en_tx_ring *tx_ring; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); @@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) carrier_ok = netif_carrier_ok(dev); netif_carrier_off(dev); -retry_tx: /* Wait until all tx queues are empty. * there should not be any additional incoming traffic * since we turned the carrier off */ msleep(200); - for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { - tx_ring = &priv->tx_ring[i]; - if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) - goto retry_tx; - } if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0698c82d6ff..f54ebd5a170 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -54,13 +54,23 @@ module_param_named(inline_thold, inline_thold, int, 0444); MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring, int qpn, u32 size, - u16 stride) + struct mlx4_en_tx_ring **pring, int qpn, u32 size, + u16 stride, int node) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; int tmp; int err; + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed allocating TX ring\n"); + return -ENOMEM; + } + } + ring->size = size; ring->size_mask = size - 1; ring->stride = stride; @@ -68,22 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, inline_thold = min(inline_thold, MAX_INLINE); tmp = size * sizeof(struct mlx4_en_tx_info); - ring->tx_info = vmalloc(tmp); - if (!ring->tx_info) - return -ENOMEM; + ring->tx_info = vmalloc_node(tmp, node); + if (!ring->tx_info) { + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + err = -ENOMEM; + goto err_ring; + } + } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); - ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); if (!ring->bounce_buf) { - err = -ENOMEM; - goto err_tx; + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + err = -ENOMEM; + goto err_info; + } } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; @@ -109,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, } ring->qp.event = mlx4_en_sqp_event; - err = mlx4_bf_alloc(mdev->dev, &ring->bf); + err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; @@ -120,6 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; + *pring = ring; return 0; err_map: @@ -129,16 +151,20 @@ err_hwq_res: err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; -err_tx: +err_info: vfree(ring->tx_info); ring->tx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; return err; } void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring) + struct mlx4_en_tx_ring **pring) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring = *pring; en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); if (ring->bf_enabled) @@ -151,6 +177,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, ring->bounce_buf = NULL; vfree(ring->tx_info); ring->tx_info = NULL; + kfree(ring); + *pring = NULL; } int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, @@ -330,7 +358,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; - struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; struct mlx4_cqe *cqe; u16 index; u16 new_index, ring_index, stamp_index; @@ -622,7 +650,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) } tx_ind = skb->queue_mapping; - ring = &priv->tx_ring[tx_ind]; + ring = priv->tx_ring[tx_ind]; if (vlan_tx_tag_present(skb)) vlan_tag = vlan_tx_tag_get(skb); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 0416c5b3b35..c9cdb2a2c59 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -936,7 +936,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, if (err) goto err_out_free_mtt; - memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | MLX4_EQ_STATE_ARMED); eq_context->log_eq_size = ilog2(eq->nent); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 0d63daa2f42..19492821460 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -159,8 +159,6 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) return PTR_ERR(mailbox); inbox = mailbox->buf; - memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); - MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); @@ -177,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + struct mlx4_priv *priv = mlx4_priv(dev); u8 field; u32 size; int err = 0; @@ -185,18 +184,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 -#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 -#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 -#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 -#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 -#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 -#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 + #define QUERY_FUNC_CAP_FMR_FLAG 0x80 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 +#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 /* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 @@ -237,8 +244,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); } else if (vhcr->op_modifier == 0) { - /* enable rdma and ethernet interfaces */ - field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); + /* enable rdma and ethernet interfaces, and new quota locations */ + field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | + QUERY_FUNC_CAP_FLAG_QUOTAS); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = dev->caps.num_ports; @@ -250,14 +258,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, field = 0; /* protected FMR support not available as yet */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); - size = dev->caps.num_qps; + size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + size = dev->caps.num_qps; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); - size = dev->caps.num_srqs; + size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + size = dev->caps.num_srqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); - size = dev->caps.num_cqs; + size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + size = dev->caps.num_cqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); size = dev->caps.num_eqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); @@ -265,14 +279,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, size = dev->caps.reserved_eqs; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); - size = dev->caps.num_mpts; + size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + size = dev->caps.num_mpts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); - size = dev->caps.num_mtts; + size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + size = dev->caps.num_mtts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); size = dev->caps.num_mgms + dev->caps.num_amgms; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); } else err = -EINVAL; @@ -287,7 +306,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, u32 *outbox; u8 field, op_modifier; u32 size; - int err = 0; + int err = 0, quotas = 0; op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ @@ -311,6 +330,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, goto out; } func_cap->flags = field; + quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); func_cap->num_ports = field; @@ -318,29 +338,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); func_cap->pf_context_behaviour = size; - MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); - func_cap->qp_quota = size & 0xFFFFFF; + if (quotas) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + func_cap->qp_quota = size & 0xFFFFFF; - MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); - func_cap->srq_quota = size & 0xFFFFFF; + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + func_cap->srq_quota = size & 0xFFFFFF; - MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); - func_cap->cq_quota = size & 0xFFFFFF; + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + func_cap->cq_quota = size & 0xFFFFFF; + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + func_cap->mcg_quota = size & 0xFFFFFF; + + } else { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + func_cap->mcg_quota = size & 0xFFFFFF; + } MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); func_cap->max_eq = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); func_cap->reserved_eq = size & 0xFFFFFF; - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); - func_cap->mpt_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); - func_cap->mtt_quota = size & 0xFFFFFF; - - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); - func_cap->mcg_quota = size & 0xFFFFFF; goto out; } @@ -652,7 +693,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_RSVD_LKEY_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) - dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN; + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) @@ -924,7 +965,6 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); pages = mailbox->buf; for (mlx4_icm_first(icm, &iter); @@ -1273,8 +1313,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) return PTR_ERR(mailbox); inbox = mailbox->buf; - memset(inbox, 0, INIT_HCA_IN_SIZE); - *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = @@ -1573,8 +1611,6 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) return PTR_ERR(mailbox); inbox = mailbox->buf; - memset(inbox, 0, INIT_PORT_IN_SIZE); - flags = 0; flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; @@ -1713,7 +1749,6 @@ void mlx4_opreq_action(struct work_struct *work) u32 *outbox; u32 modifier; u16 token; - u16 type_m; u16 type; int err; u32 num_qps; @@ -1739,14 +1774,13 @@ void mlx4_opreq_action(struct work_struct *work) MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { - mlx4_err(dev, "Failed to retreive required operation: %d\n", + mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); return; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); - type_m = type >> 12; type &= 0xfff; switch (type) { diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 31d02649be4..5fbf4924c27 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) kfree(icm); } -static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) +static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, + gfp_t gfp_mask, int node) { struct page *page; - page = alloc_pages(gfp_mask, order); - if (!page) - return -ENOMEM; + page = alloc_pages_node(node, gfp_mask, order); + if (!page) { + page = alloc_pages(gfp_mask, order); + if (!page) + return -ENOMEM; + } sg_set_page(mem, page, PAGE_SIZE << order, 0); return 0; @@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, /* We use sg_set_buf for coherent allocs, which assumes low memory */ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); - icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); - if (!icm) - return NULL; + icm = kmalloc_node(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), + dev->numa_node); + if (!icm) { + icm = kmalloc(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return NULL; + } icm->refcount = 0; INIT_LIST_HEAD(&icm->chunk_list); @@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (npages > 0) { if (!chunk) { - chunk = kmalloc(sizeof *chunk, - gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); - if (!chunk) - goto fail; + chunk = kmalloc_node(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN), + dev->numa_node); + if (!chunk) { + chunk = kmalloc(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN)); + if (!chunk) + goto fail; + } sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); chunk->npages = 0; @@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, cur_order, gfp_mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], - cur_order, gfp_mask); + cur_order, gfp_mask, + dev->numa_node); if (ret) { if (--cur_order < 0) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 60c9f4f103f..01fc6515384 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -42,6 +42,7 @@ #include <linux/io-mapping.h> #include <linux/delay.h> #include <linux/netdevice.h> +#include <linux/kmod.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -561,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) } dev->caps.num_ports = func_cap.num_ports; - dev->caps.num_qps = func_cap.qp_quota; - dev->caps.num_srqs = func_cap.srq_quota; - dev->caps.num_cqs = func_cap.cq_quota; - dev->caps.num_eqs = func_cap.max_eq; - dev->caps.reserved_eqs = func_cap.reserved_eq; - dev->caps.num_mpts = func_cap.mpt_quota; - dev->caps.num_mtts = func_cap.mtt_quota; + dev->quotas.qp = func_cap.qp_quota; + dev->quotas.srq = func_cap.srq_quota; + dev->quotas.cq = func_cap.cq_quota; + dev->quotas.mpt = func_cap.mpt_quota; + dev->quotas.mtt = func_cap.mtt_quota; + dev->caps.num_qps = 1 << hca_param.log_num_qps; + dev->caps.num_srqs = 1 << hca_param.log_num_srqs; + dev->caps.num_cqs = 1 << hca_param.log_num_cqs; + dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; + dev->caps.num_eqs = func_cap.max_eq; + dev->caps.reserved_eqs = func_cap.reserved_eq; dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; @@ -650,6 +655,27 @@ err_mem: return err; } +static void mlx4_request_modules(struct mlx4_dev *dev) +{ + int port; + int has_ib_port = false; + int has_eth_port = false; +#define EN_DRV_NAME "mlx4_en" +#define IB_DRV_NAME "mlx4_ib" + + for (port = 1; port <= dev->caps.num_ports; port++) { + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) + has_ib_port = true; + else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + has_eth_port = true; + } + + if (has_ib_port) + request_module_nowait(IB_DRV_NAME); + if (has_eth_port) + request_module_nowait(EN_DRV_NAME); +} + /* * Change the port configuration of the device. * Every user of this function must hold the port mutex. @@ -681,6 +707,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev, } mlx4_set_port_mask(dev); err = mlx4_register_device(dev); + if (err) { + mlx4_err(dev, "Failed to register device\n"); + goto out; + } + mlx4_request_modules(dev); } out: @@ -2075,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) "aborting.\n"); return err; } - if (num_vfs > MLX4_MAX_NUM_VF) { - printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", - num_vfs, MLX4_MAX_NUM_VF); + + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS + * per port, we must limit the number of VFs to 63 (since their are + * 128 MACs) + */ + if (num_vfs >= MLX4_MAX_NUM_VF) { + dev_err(&pdev->dev, + "Requested more VF's (%d) than allowed (%d)\n", + num_vfs, MLX4_MAX_NUM_VF - 1); return -EINVAL; } @@ -2154,6 +2191,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) mutex_init(&priv->bf_mutex); dev->rev_id = pdev->revision; + dev->numa_node = dev_to_node(&pdev->dev); /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { /* When acting as pf, we normally skip vfs unless explicitly @@ -2295,6 +2333,8 @@ slave_start: if (err) goto err_steer; + mlx4_init_quotas(dev); + for (port = 1; port <= dev->caps.num_ports; port++) { err = mlx4_init_port_info(dev, port); if (err) @@ -2305,6 +2345,8 @@ slave_start: if (err) goto err_port; + mlx4_request_modules(dev); + mlx4_sense_init(dev); mlx4_start_sense(dev); @@ -2593,6 +2635,8 @@ static int __init mlx4_init(void) return -ENOMEM; ret = pci_register_driver(&mlx4_driver); + if (ret < 0) + destroy_workqueue(mlx4_wq); return ret < 0 ? ret : 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 55f6245efb6..acf9d5f1f92 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -506,7 +506,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, goto out_list; } mgm = mailbox->buf; - memset(mgm, 0, sizeof *mgm); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); @@ -645,7 +644,7 @@ static const u8 __promisc_mode[] = { int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, enum mlx4_net_trans_promisc_mode flow_type) { - if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { + if (flow_type >= MLX4_FS_MODE_NUM) { mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); return -EINVAL; } @@ -681,7 +680,7 @@ const u16 __sw_id_hw[] = { int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id) { - if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + if (id >= MLX4_NET_TRANS_RULE_NUM) { mlx4_err(dev, "Invalid network rule id. id = %d\n", id); return -EINVAL; } @@ -706,7 +705,7 @@ static const int __rule_hw_sz[] = { int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id) { - if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + if (id >= MLX4_NET_TRANS_RULE_NUM) { mlx4_err(dev, "Invalid network rule id. id = %d\n", id); return -EINVAL; } @@ -857,7 +856,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 348bb8c7d9a..e582a41a802 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -455,6 +455,7 @@ struct mlx4_slave_state { u8 last_cmd; u8 init_port_mask; bool active; + bool old_vlan_api; u8 function; dma_addr_t vhcr_dma; u16 mtu[MLX4_MAX_PORTS + 1]; @@ -503,12 +504,28 @@ struct slave_list { struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; }; +struct resource_allocator { + spinlock_t alloc_lock; /* protect quotas */ + union { + int res_reserved; + int res_port_rsvd[MLX4_MAX_PORTS]; + }; + union { + int res_free; + int res_port_free[MLX4_MAX_PORTS]; + }; + int *quota; + int *allocated; + int *guaranteed; +}; + struct mlx4_resource_tracker { spinlock_t lock; /* tree for each resources */ struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; /* num_of_slave's lists, one per slave */ struct slave_list *slave_list; + struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; }; #define SLAVE_EVENT_EQ_SIZE 128 @@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); -void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); @@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); +void mlx4_init_quotas(struct mlx4_dev *dev); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5e0aa569306..f3758de59c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -237,8 +237,8 @@ struct mlx4_en_tx_desc { struct mlx4_en_rx_alloc { struct page *page; dma_addr_t dma; - u32 offset; - u32 size; + u32 page_offset; + u32 page_size; }; struct mlx4_en_tx_ring { @@ -530,10 +530,10 @@ struct mlx4_en_priv { u16 num_frags; u16 log_rx_info; - struct mlx4_en_tx_ring *tx_ring; - struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; - struct mlx4_en_cq *tx_cq; - struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; + struct mlx4_en_tx_ring **tx_ring; + struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; + struct mlx4_en_cq **tx_cq; + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; struct work_struct watchdog_task; @@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) if ((cq->state & MLX4_CQ_LOCKED)) { struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; rc = false; @@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach); void mlx4_en_free_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); -int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, - int entries, int ring, enum cq_type mode); -void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, int node); +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq); int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx); void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); @@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); -int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int qpn, u32 size, u16 stride); -void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring, + int qpn, u32 size, u16 stride, int node); +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring); int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int cq, int user_prio); @@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, - u32 size, u16 stride); + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node); void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_ring **pring, u32 size, u16 stride); int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, @@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; int mlx4_en_setup_tc(struct net_device *dev, u8 up); #ifdef CONFIG_RFS_ACCEL -void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *rx_ring); +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); #endif #define MLX4_EN_NUM_SELF_TEST 5 diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index f91719a08cb..b3ee9bafff5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -480,9 +480,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) goto err_table; } mpt_entry = mailbox->buf; - - memset(mpt_entry, 0, sizeof *mpt_entry); - mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | MLX4_MPT_FLAG_REGION | mr->access); @@ -695,8 +692,6 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) } mpt_entry = mailbox->buf; - memset(mpt_entry, 0, sizeof(*mpt_entry)); - /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned * off, thus creating a memory window and not a memory region. */ @@ -755,14 +750,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) struct mlx4_mr_table *mr_table = &priv->mr_table; int err; - if (!is_power_of_2(dev->caps.num_mpts)) - return -EINVAL; - /* Nothing to do for slaves - all MR handling is forwarded * to the master */ if (mlx4_is_slave(dev)) return 0; + if (!is_power_of_2(dev->caps.num_mpts)) + return -EINVAL; + err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, ~0, dev->caps.reserved_mrws, 0); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 00f223acada..84cfb40bf45 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) } EXPORT_SYMBOL_GPL(mlx4_uar_free); -int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) +int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_uar *uar; @@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) err = -ENOMEM; goto out; } - uar = kmalloc(sizeof *uar, GFP_KERNEL); + uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node); if (!uar) { - err = -ENOMEM; - goto out; + uar = kmalloc(sizeof(*uar), GFP_KERNEL); + if (!uar) { + err = -ENOMEM; + goto out; + } } err = mlx4_uar_alloc(dev, uar); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 946e0af5fae..97d342fa503 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { u64 out_param = 0; - int err; + int err = -EINVAL; if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + err = mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } + if (err && err == -EINVAL && mlx4_is_slave(dev)) { + /* retry using old REG_MAC format */ + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + dev->flags |= MLX4_FLAG_OLD_REG_MAC; + } if (err) return err; @@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) u64 out_param = 0; if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, - RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + (void) mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } else { + /* use old unregister mac format */ + set_param_l(&out_param, port); + (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } return; } __mlx4_unregister_mac(dev, port, mac); @@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); mlx4_free_cmd_mailbox(dev, mailbox); @@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) u64 out_param = 0; int err; + if (vlan > 4095) + return -EINVAL; + if (mlx4_is_mfunc(dev)) { - set_param_l(&out_param, port); - err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, + err = mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) @@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) } EXPORT_SYMBOL_GPL(mlx4_register_vlan); -void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) { struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int index; - if (index < MLX4_VLAN_REGULAR) { - mlx4_warn(dev, "Trying to free special vlan index %d\n", index); - return; + mutex_lock(&table->mutex); + if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { + mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); + goto out; } - mutex_lock(&table->mutex); - if (!table->refs[index]) { - mlx4_warn(dev, "No vlan entry for index %d\n", index); + if (index < MLX4_VLAN_REGULAR) { + mlx4_warn(dev, "Trying to free special vlan index %d\n", index); goto out; } + if (--table->refs[index]) { - mlx4_dbg(dev, "Have more references for index %d," - "no need to modify vlan table\n", index); + mlx4_dbg(dev, "Have %d more references for index %d," + "no need to modify vlan table\n", table->refs[index], + index); goto out; } table->entries[index] = 0; @@ -410,23 +435,19 @@ out: mutex_unlock(&table->mutex); } -void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) { - u64 in_param = 0; - int err; + u64 out_param = 0; if (mlx4_is_mfunc(dev)) { - set_param_l(&in_param, port); - err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, - MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); - if (!err) - mlx4_warn(dev, "Failed freeing vlan at index:%d\n", - index); - + (void) mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); return; } - __mlx4_unregister_vlan(dev, port, index); + __mlx4_unregister_vlan(dev, port, vlan); } EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); @@ -448,8 +469,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) inbuf = inmailbox->buf; outbuf = outmailbox->buf; - memset(inbuf, 0, 256); - memset(outbuf, 0, 256); inbuf[0] = 1; inbuf[1] = 1; inbuf[2] = 1; @@ -632,8 +651,6 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - memset(mailbox->buf, 0, 256); - ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { @@ -671,8 +688,6 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - memset(context, 0, sizeof *context); - context->flags = SET_PORT_GEN_ALL_VALID; context->mtu = cpu_to_be16(mtu); context->pptx = (pptx * (!pfctx)) << 7; @@ -706,8 +721,6 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - memset(context, 0, sizeof *context); - context->base_qpn = cpu_to_be32(base_qpn); context->n_mac = dev->caps.log_num_macs; context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | @@ -740,8 +753,6 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - memset(context, 0, sizeof *context); - for (i = 0; i < MLX4_NUM_UP; i += 2) context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; @@ -767,7 +778,6 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); context = mailbox->buf; - memset(context, 0, sizeof *context); for (i = 0; i < MLX4_NUM_TC; i++) { struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index e891b058c1b..2715e61dbb7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) */ err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, - (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 + - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev), + (1 << 23) - 1, mlx4_num_reserved_sqps(dev), reserved_from_top); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index dd687632111..2f3f2bc7f28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -55,6 +55,14 @@ struct mac_res { u8 port; }; +struct vlan_res { + struct list_head list; + u16 vlan; + int ref_count; + int vlan_index; + u8 port; +}; + struct res_common { struct list_head list; struct rb_node node; @@ -102,7 +110,14 @@ struct res_qp { int local_qpn; atomic_t ref_count; u32 qpc_flags; + /* saved qp params before VST enforcement in order to restore on VGT */ u8 sched_queue; + __be32 param3; + u8 vlan_control; + u8 fvl_rx; + u8 pri_path_fl; + u8 vlan_index; + u8 feup; }; enum res_mtt_states { @@ -266,6 +281,7 @@ static const char *ResourceType(enum mlx4_resource rt) case RES_MPT: return "RES_MPT"; case RES_MTT: return "RES_MTT"; case RES_MAC: return "RES_MAC"; + case RES_VLAN: return "RES_VLAN"; case RES_EQ: return "RES_EQ"; case RES_COUNTER: return "RES_COUNTER"; case RES_FS_RULE: return "RES_FS_RULE"; @@ -274,10 +290,139 @@ static const char *ResourceType(enum mlx4_resource rt) }; } +static void rem_slave_vlans(struct mlx4_dev *dev, int slave); +static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int err = -EINVAL; + int allocated, free, reserved, guaranteed, from_free; + + if (slave > dev->num_vfs) + return -EINVAL; + + spin_lock(&res_alloc->alloc_lock); + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + free = (port > 0) ? res_alloc->res_port_free[port - 1] : + res_alloc->res_free; + reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : + res_alloc->res_reserved; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated + count > res_alloc->quota[slave]) + goto out; + + if (allocated + count <= guaranteed) { + err = 0; + } else { + /* portion may need to be obtained from free area */ + if (guaranteed - allocated > 0) + from_free = count - (guaranteed - allocated); + else + from_free = count; + + if (free - from_free > reserved) + err = 0; + } + + if (!err) { + /* grant the request */ + if (port > 0) { + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; + res_alloc->res_port_free[port - 1] -= count; + } else { + res_alloc->allocated[slave] += count; + res_alloc->res_free -= count; + } + } + +out: + spin_unlock(&res_alloc->alloc_lock); + return err; +} + +static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + + if (slave > dev->num_vfs) + return; + + spin_lock(&res_alloc->alloc_lock); + if (port > 0) { + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; + res_alloc->res_port_free[port - 1] += count; + } else { + res_alloc->allocated[slave] -= count; + res_alloc->res_free += count; + } + + spin_unlock(&res_alloc->alloc_lock); + return; +} + +static inline void initialize_res_quotas(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + enum mlx4_resource res_type, + int vf, int num_instances) +{ + res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); + res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; + if (vf == mlx4_master_func_num(dev)) { + res_alloc->res_free = num_instances; + if (res_type == RES_MTT) { + /* reserved mtts will be taken out of the PF allocation */ + res_alloc->res_free += dev->caps.reserved_mtts; + res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; + res_alloc->quota[vf] += dev->caps.reserved_mtts; + } + } +} + +void mlx4_init_quotas(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int pf; + + /* quotas for VFs are initialized in mlx4_slave_cap */ + if (mlx4_is_slave(dev)) + return; + + if (!mlx4_is_mfunc(dev)) { + dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev); + dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; + dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; + dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; + dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; + return; + } + + pf = mlx4_master_func_num(dev); + dev->quotas.qp = + priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; + dev->quotas.cq = + priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; + dev->quotas.srq = + priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; + dev->quotas.mtt = + priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; + dev->quotas.mpt = + priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; +} int mlx4_init_resource_tracker(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - int i; + int i, j; int t; priv->mfunc.master.res_tracker.slave_list = @@ -298,8 +443,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[i]; + res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); + res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); + if (i == RES_MAC || i == RES_VLAN) + res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * + (dev->num_vfs + 1) * sizeof(int), + GFP_KERNEL); + else + res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); + + if (!res_alloc->quota || !res_alloc->guaranteed || + !res_alloc->allocated) + goto no_mem_err; + + spin_lock_init(&res_alloc->alloc_lock); + for (t = 0; t < dev->num_vfs + 1; t++) { + switch (i) { + case RES_QP: + initialize_res_quotas(dev, res_alloc, RES_QP, + t, dev->caps.num_qps - + dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev)); + break; + case RES_CQ: + initialize_res_quotas(dev, res_alloc, RES_CQ, + t, dev->caps.num_cqs - + dev->caps.reserved_cqs); + break; + case RES_SRQ: + initialize_res_quotas(dev, res_alloc, RES_SRQ, + t, dev->caps.num_srqs - + dev->caps.reserved_srqs); + break; + case RES_MPT: + initialize_res_quotas(dev, res_alloc, RES_MPT, + t, dev->caps.num_mpts - + dev->caps.reserved_mrws); + break; + case RES_MTT: + initialize_res_quotas(dev, res_alloc, RES_MTT, + t, dev->caps.num_mtts - + dev->caps.reserved_mtts); + break; + case RES_MAC: + if (t == mlx4_master_func_num(dev)) { + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + res_alloc->guaranteed[t] = 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; + } else { + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + res_alloc->guaranteed[t] = 2; + } + break; + case RES_VLAN: + if (t == mlx4_master_func_num(dev)) { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; + res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = + res_alloc->quota[t]; + } else { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; + res_alloc->guaranteed[t] = 0; + } + break; + case RES_COUNTER: + res_alloc->quota[t] = dev->caps.max_counters; + res_alloc->guaranteed[t] = 0; + if (t == mlx4_master_func_num(dev)) + res_alloc->res_free = res_alloc->quota[t]; + break; + default: + break; + } + if (i == RES_MAC || i == RES_VLAN) { + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; + } else { + res_alloc->res_reserved += res_alloc->guaranteed[t]; + } + } + } spin_lock_init(&priv->mfunc.master.res_tracker.lock); - return 0 ; + return 0; + +no_mem_err: + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } + return -ENOMEM; } void mlx4_free_resource_tracker(struct mlx4_dev *dev, @@ -309,13 +551,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev, int i; if (priv->mfunc.master.res_tracker.slave_list) { - if (type != RES_TR_FREE_STRUCTS_ONLY) - for (i = 0 ; i < dev->num_slaves; i++) + if (type != RES_TR_FREE_STRUCTS_ONLY) { + for (i = 0; i < dev->num_slaves; i++) { if (type == RES_TR_FREE_ALL || dev->caps.function != i) mlx4_delete_all_resources_for_slave(dev, i); + } + /* free master's vlans */ + i = dev->caps.function; + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + rem_slave_vlans(dev, i); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } if (type != RES_TR_FREE_SLAVES_ONLY) { + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } kfree(priv->mfunc.master.res_tracker.slave_list); priv->mfunc.master.res_tracker.slave_list = NULL; } @@ -1229,12 +1486,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, case RES_OP_RESERVE: count = get_param_l(&in_param); align = get_param_h(&in_param); - err = __mlx4_qp_reserve_range(dev, count, align, &base); + err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) return err; + err = __mlx4_qp_reserve_range(dev, count, align, &base); + if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); + return err; + } + err = add_res_range(dev, slave, base, count, RES_QP, 0); if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); __mlx4_qp_release_range(dev, base, count); return err; } @@ -1282,15 +1546,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; order = get_param_l(&in_param); + + err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); + if (err) + return err; + base = __mlx4_alloc_mtt_range(dev, order); - if (base == -1) + if (base == -1) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); return -ENOMEM; + } err = add_res_range(dev, slave, base, 1, RES_MTT, order); - if (err) + if (err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); __mlx4_free_mtt_range(dev, base, order); - else + } else { set_param_l(out_param, base); + } return err; } @@ -1305,13 +1578,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE: + err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); + if (err) + break; + index = __mlx4_mpt_reserve(dev); - if (index == -1) + if (index == -1) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); break; + } id = index & mpt_mask(dev); err = add_res_range(dev, slave, id, 1, RES_MPT, index); if (err) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); __mlx4_mpt_release(dev, index); break; } @@ -1345,12 +1625,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE_AND_MAP: - err = __mlx4_cq_alloc_icm(dev, &cqn); + err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); if (err) break; + err = __mlx4_cq_alloc_icm(dev, &cqn); + if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + break; + } + err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); __mlx4_cq_free_icm(dev, cqn); break; } @@ -1373,12 +1660,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE_AND_MAP: - err = __mlx4_srq_alloc_icm(dev, &srqn); + err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); if (err) break; + err = __mlx4_srq_alloc_icm(dev, &srqn); + if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + break; + } + err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); __mlx4_srq_free_icm(dev, srqn); break; } @@ -1399,9 +1693,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct mac_res *res; + if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) + return -EINVAL; res = kzalloc(sizeof *res, GFP_KERNEL); - if (!res) + if (!res) { + mlx4_release_resource(dev, slave, RES_MAC, 1, port); return -ENOMEM; + } res->mac = mac; res->port = (u8) port; list_add_tail(&res->list, @@ -1421,6 +1719,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, list_for_each_entry_safe(res, tmp, mac_list, list) { if (res->mac == mac && res->port == (u8) port) { list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); kfree(res); break; } @@ -1438,12 +1737,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave) list_for_each_entry_safe(res, tmp, mac_list, list) { list_del(&res->list); __mlx4_unregister_mac(dev, res->port, res->mac); + mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); kfree(res); } } static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) + u64 in_param, u64 *out_param, int in_port) { int err = -EINVAL; int port; @@ -1452,7 +1752,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (op != RES_OP_RESERVE_AND_MAP) return err; - port = get_param_l(out_param); + port = !in_port ? get_param_l(out_param) : in_port; mac = in_param; err = __mlx4_register_mac(dev, port, mac); @@ -1469,12 +1769,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; } -static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) +static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port, int vlan_index) { + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + /* vlan found. update ref count */ + ++res->ref_count; + return 0; + } + } + + if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) + return -EINVAL; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + mlx4_release_resource(dev, slave, RES_VLAN, 1, port); + return -ENOMEM; + } + res->vlan = vlan; + res->port = (u8) port; + res->vlan_index = vlan_index; + res->ref_count = 1; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_VLAN]); return 0; } + +static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_VLAN, + 1, port); + kfree(res); + } + break; + } + } +} + +static void rem_slave_vlans(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + int i; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + list_del(&res->list); + /* dereference the vlan the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_vlan(dev, res->port, res->vlan); + mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); + kfree(res); + } +} + +static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err; + u16 vlan; + int vlan_index; + int port; + + port = !in_port ? get_param_l(out_param) : in_port; + + if (!port || op != RES_OP_RESERVE_AND_MAP) + return -EINVAL; + + /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ + if (!in_port && port > 0 && port <= dev->caps.num_ports) { + slave_state[slave].old_vlan_api = true; + return 0; + } + + vlan = (u16) in_param; + + err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); + if (!err) { + set_param_l(out_param, (u32) vlan_index); + err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); + if (err) + __mlx4_unregister_vlan(dev, port, vlan); + } + return err; +} + static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { @@ -1484,15 +1886,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (op != RES_OP_RESERVE) return -EINVAL; - err = __mlx4_counter_alloc(dev, &index); + err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); if (err) return err; + err = __mlx4_counter_alloc(dev, &index); + if (err) { + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + return err; + } + err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); - if (err) + if (err) { __mlx4_counter_free(dev, index); - else + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + } else { set_param_l(out_param, index); + } return err; } @@ -1528,7 +1938,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, int err; int alop = vhcr->op_modifier; - switch (vhcr->in_modifier) { + switch (vhcr->in_modifier & 0xFF) { case RES_QP: err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); @@ -1556,12 +1966,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, case RES_MAC: err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); break; case RES_VLAN: err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); break; case RES_COUNTER: @@ -1597,6 +2009,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, err = rem_res_range(dev, slave, base, count, RES_QP, 0); if (err) break; + mlx4_release_resource(dev, slave, RES_QP, count, 0); __mlx4_qp_release_range(dev, base, count); break; case RES_OP_MAP_ICM: @@ -1634,8 +2047,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, base = get_param_l(&in_param); order = get_param_h(&in_param); err = rem_res_range(dev, slave, base, 1, RES_MTT, order); - if (!err) + if (!err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); __mlx4_free_mtt_range(dev, base, order); + } return err; } @@ -1660,6 +2075,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); if (err) break; + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); __mlx4_mpt_release(dev, index); break; case RES_OP_MAP_ICM: @@ -1694,6 +2110,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) break; + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); __mlx4_cq_free_icm(dev, cqn); break; @@ -1718,6 +2135,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) break; + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); __mlx4_srq_free_icm(dev, srqn); break; @@ -1730,14 +2148,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, } static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) + u64 in_param, u64 *out_param, int in_port) { int port; int err = 0; switch (op) { case RES_OP_RESERVE_AND_MAP: - port = get_param_l(out_param); + port = !in_port ? get_param_l(out_param) : in_port; mac_del_from_slave(dev, slave, in_param, port); __mlx4_unregister_mac(dev, port, in_param); break; @@ -1751,9 +2169,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, } static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, - u64 in_param, u64 *out_param) + u64 in_param, u64 *out_param, int port) { - return 0; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err = 0; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + if (slave_state[slave].old_vlan_api) + return 0; + if (!port) + return -EINVAL; + vlan_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_vlan(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; } static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, @@ -1771,6 +2207,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); return err; } @@ -1803,7 +2240,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, int err = -EINVAL; int alop = vhcr->op_modifier; - switch (vhcr->in_modifier) { + switch (vhcr->in_modifier & 0xFF) { case RES_QP: err = qp_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param); @@ -1831,12 +2268,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, case RES_MAC: err = mac_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); break; case RES_VLAN: err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, - vhcr->in_param, &vhcr->out_param); + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); break; case RES_COUNTER: @@ -2136,6 +2575,12 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, return err; qp->local_qpn = local_qpn; qp->sched_queue = 0; + qp->param3 = 0; + qp->vlan_control = 0; + qp->fvl_rx = 0; + qp->pri_path_fl = 0; + qp->vlan_index = 0; + qp->feup = 0; qp->qpc_flags = be32_to_cpu(qpc->flags); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); @@ -2862,6 +3307,12 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; u8 orig_sched_queue; + __be32 orig_param3 = qpc->param3; + u8 orig_vlan_control = qpc->pri_path.vlan_control; + u8 orig_fvl_rx = qpc->pri_path.fvl_rx; + u8 orig_pri_path_fl = qpc->pri_path.fl; + u8 orig_vlan_index = qpc->pri_path.vlan_index; + u8 orig_feup = qpc->pri_path.feup; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) @@ -2889,9 +3340,15 @@ out: * essentially the QOS value provided by the VF. This will be useful * if we allow dynamic changes from VST back to VGT */ - if (!err) + if (!err) { qp->sched_queue = orig_sched_queue; - + qp->param3 = orig_param3; + qp->vlan_control = orig_vlan_control; + qp->fvl_rx = orig_fvl_rx; + qp->pri_path_fl = orig_pri_path_fl; + qp->vlan_index = orig_vlan_index; + qp->feup = orig_feup; + } put_res(dev, slave, qpn, RES_QP); return err; } @@ -3498,6 +3955,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_QP]); list_del(&qp->com.list); spin_unlock_irq(mlx4_tlock(dev)); + if (!valid_reserved(dev, slave, qpn)) { + __mlx4_qp_release_range(dev, qpn, 1); + mlx4_release_resource(dev, slave, + RES_QP, 1, 0); + } kfree(qp); state = 0; break; @@ -3569,6 +4031,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_SRQ]); list_del(&srq->com.list); spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_SRQ, 1, 0); kfree(srq); state = 0; break; @@ -3635,6 +4099,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_CQ]); list_del(&cq->com.list); spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_CQ, 1, 0); kfree(cq); state = 0; break; @@ -3698,6 +4164,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_MPT]); list_del(&mpt->com.list); spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_MPT, 1, 0); kfree(mpt); state = 0; break; @@ -3767,6 +4235,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_MTT]); list_del(&mtt->com.list); spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, RES_MTT, + 1 << mtt->order, 0); kfree(mtt); state = 0; break; @@ -3925,6 +4395,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) list_del(&counter->com.list); kfree(counter); __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); } } spin_unlock_irq(mlx4_tlock(dev)); @@ -3964,7 +4435,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); - /*VLAN*/ + rem_slave_vlans(dev, slave); rem_slave_macs(dev, slave); rem_slave_fs_rule(dev, slave); rem_slave_qps(dev, slave); @@ -3991,13 +4462,20 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) &tracker->slave_list[work->slave].res_list[RES_QP]; struct res_qp *qp; struct res_qp *tmp; - u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + u64 qp_path_mask_vlan_ctrl = + ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | - (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | - (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); + + u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | + (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); int err; @@ -4029,9 +4507,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; upd_context = mailbox->buf; - upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); - upd_context->qp_context.pri_path.vlan_control = vlan_control; - upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { @@ -4049,10 +4525,35 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) spin_lock_irq(mlx4_tlock(dev)); continue; } - upd_context->qp_context.pri_path.sched_queue = - qp->sched_queue & 0xC7; - upd_context->qp_context.pri_path.sched_queue |= - ((work->qos & 0x7) << 3); + if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) + upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); + else + upd_context->primary_addr_path_mask = + cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); + if (work->vlan_id == MLX4_VGT) { + upd_context->qp_context.param3 = qp->param3; + upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; + upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; + upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; + upd_context->qp_context.pri_path.fl = qp->pri_path_fl; + upd_context->qp_context.pri_path.feup = qp->feup; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue; + } else { + upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + upd_context->qp_context.pri_path.fvl_rx = + qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.fl = + qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + upd_context->qp_context.pri_path.feup = + qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + } err = mlx4_cmd(dev, mailbox->dma, qp->local_qpn & 0xffffff, @@ -4081,7 +4582,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && NO_INDX != work->orig_vlan_ix) __mlx4_unregister_vlan(&work->priv->dev, work->port, - work->orig_vlan_ix); + work->orig_vlan_id); out: kfree(work); return; diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index 79fd269e2c5..8fdf2375377 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -34,6 +34,7 @@ #include <linux/init.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/srq.h> #include <linux/export.h> #include <linux/gfp.h> @@ -188,8 +189,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, } srq_context = mailbox->buf; - memset(srq_context, 0, sizeof *srq_context); - srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5472cbd3402..8675d26a678 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -98,6 +98,7 @@ enum { static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, + void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { @@ -110,6 +111,8 @@ static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, ent->in = in; ent->out = out; + ent->uout = uout; + ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; @@ -180,28 +183,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block) return 0; } -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, + int csum) { block->token = token; - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); - block->sig = ~xor8_buf(block, sizeof(*block) - 1); + if (csum) { + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - + sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); + } } -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) { struct mlx5_cmd_mailbox *next = msg->next; while (next) { - calc_block_sig(next->buf, token); + calc_block_sig(next->buf, token, csum); next = next->next; } } -static void set_signature(struct mlx5_cmd_work_ent *ent) +static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); - calc_chain_sig(ent->in, ent->token); - calc_chain_sig(ent->out, ent->token); + calc_chain_sig(ent->in, ent->token, csum); + calc_chain_sig(ent->out, ent->token, csum); } static void poll_timeout(struct mlx5_cmd_work_ent *ent) @@ -530,6 +537,7 @@ static void cmd_work_handler(struct work_struct *work) ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); @@ -539,8 +547,7 @@ static void cmd_work_handler(struct work_struct *work) lay->type = MLX5_PCI_CMD_XPORT; lay->token = ent->token; lay->status_own = CMD_OWNER_HW; - if (!cmd->checksum_disabled) - set_signature(ent); + set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ktime_get_ts(&ent->ts1); @@ -625,7 +632,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, - struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; @@ -639,7 +647,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (callback && page_queue) return -EINVAL; - ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, + page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); @@ -667,10 +676,10 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", @@ -773,8 +782,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); block = next->buf; - if (xor8_buf(block, sizeof(*block)) != 0xff) - return -EINVAL; memcpy(to, block->data, copy); to += copy; @@ -825,7 +832,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, int n; int i; - msg = kzalloc(sizeof(*msg), GFP_KERNEL); + msg = kzalloc(sizeof(*msg), flags); if (!msg) return ERR_PTR(-ENOMEM); @@ -1108,6 +1115,19 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) up(&cmd->sem); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + unsigned long flags; + + if (msg->cache) { + spin_lock_irqsave(&msg->cache->lock, flags); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock_irqrestore(&msg->cache->lock, flags); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1116,6 +1136,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) void *context; int err; int i; + ktime_t t1, t2, delta; + s64 ds; + struct mlx5_cmd_stats *stats; + unsigned long flags; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { @@ -1140,9 +1164,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) } free_ent(cmd, ent->idx); if (ent->callback) { + t1 = timespec_to_ktime(ent->ts1); + t2 = timespec_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); + } + callback = ent->callback; context = ent->context; err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); + + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); callback(err, context); } else { @@ -1159,7 +1203,8 @@ static int status_to_err(u8 status) return status ? -1 : 0; /* TBD more meaningful codes */ } -static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, + gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; @@ -1171,7 +1216,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) ent = &cmd->cache.med; if (ent) { - spin_lock(&ent->lock); + spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, typeof(*msg), list); /* For cached lists, we must explicitly state what is @@ -1180,43 +1225,34 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) msg->len = in_size; list_del(&msg->list); } - spin_unlock(&ent->lock); + spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } -static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) -{ - if (msg->cache) { - spin_lock(&msg->cache->lock); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock(&msg->cache->lock); - } else { - mlx5_free_cmd_msg(dev, msg); - } -} - static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } -int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, - int out_size) +static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; + gfp_t gfp; int err; u8 status = 0; pages_queue = is_manage_pages(in); + gfp = callback ? GFP_ATOMIC : GFP_KERNEL; - inb = alloc_msg(dev, in_size); + inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; @@ -1228,13 +1264,14 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } - err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, + pages_queue, &status); if (err) goto out_out; @@ -1247,14 +1284,30 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_copy_from_msg(out, outb, out_size); out_out: - mlx5_free_cmd_msg(dev, outb); + if (!callback) + mlx5_free_cmd_msg(dev, outb); out_in: - free_msg(dev, inb); + if (!callback) + free_msg(dev, inb); return err; } + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); +} EXPORT_SYMBOL(mlx5_cmd_exec); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context) +{ + return cmd_exec(dev, in, in_size, out, out_size, callback, context); +} +EXPORT_SYMBOL(mlx5_cmd_exec_cb); + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; @@ -1361,6 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_map; } + cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; @@ -1510,7 +1564,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; - case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 9c7194b26ee..80f6d127257 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -154,10 +154,10 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, return 0; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); if (stats->n) field = div64_u64(stats->sum, stats->n); - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); if (ret > 0) { if (copy_to_user(buf, tbuf, ret)) @@ -175,10 +175,10 @@ static ssize_t average_write(struct file *filp, const char __user *buf, struct mlx5_cmd_stats *stats; stats = filp->private_data; - spin_lock(&stats->lock); + spin_lock_irq(&stats->lock); stats->sum = 0; stats->n = 0; - spin_unlock(&stats->lock); + spin_unlock_irq(&stats->lock); *pos += count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 443cc4d7b02..64a61b286b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); in->ctx.intr = vecidx; - in->ctx.log_page_size = PAGE_SHIFT - 12; + in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in->events_mask = cpu_to_be64(mask); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); @@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, goto err_in; } + snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); eq->eqn = out.eq_number; err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, - name, eq); + eq->name, eq); if (err) goto err_eq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b47739b0b5f..40a9f5ed814 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -159,15 +159,43 @@ struct mlx5_reg_host_endianess { u8 rsvd[15]; }; + +#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) + +enum { + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + CAP_MASK(MLX5_CAP_OFF_DCT, 1), +}; + +/* selectively copy writable fields clearing any reserved area + */ +static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) +{ + u64 v64; + + to->log_max_qp = from->log_max_qp & 0x1f; + to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; + to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; + to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; + to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; + to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; + to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; + v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; + to->flags = cpu_to_be64(v64); +} + +enum { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + static int handle_hca_cap(struct mlx5_core_dev *dev) { struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; struct mlx5_cmd_set_hca_cap_mbox_out set_out; - struct mlx5_profile *prof = dev->profile; u64 flags; - int csum = 1; int err; memset(&query_ctx, 0, sizeof(query_ctx)); @@ -182,7 +210,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) } query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); - query_ctx.hdr.opmod = cpu_to_be16(0x1); + query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), query_out, sizeof(*query_out)); if (err) @@ -194,23 +222,16 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) goto query_ex; } - memcpy(&set_ctx->hca_cap, &query_out->hca_cap, - sizeof(set_ctx->hca_cap)); - - if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { - csum = !!prof->cmdif_csum; - flags = be64_to_cpu(set_ctx->hca_cap.flags); - if (csum) - flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; - else - flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; - - set_ctx->hca_cap.flags = cpu_to_be64(flags); - } + copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; + flags = be64_to_cpu(query_out->hca_cap.flags); + /* disable checksum */ + flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + set_ctx->hca_cap.flags = cpu_to_be64(flags); memset(&set_out, 0, sizeof(set_out)); set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); @@ -225,9 +246,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) if (err) goto query_ex; - if (!csum) - dev->cmd.checksum_disabled = 1; - query_ex: kfree(query_out); kfree(set_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 5b44e2e46da..35e514dc7b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -37,31 +37,41 @@ #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen) + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out) { - struct mlx5_create_mkey_mbox_out out; + struct mlx5_create_mkey_mbox_out lout; int err; u8 key; - memset(&out, 0, sizeof(out)); - spin_lock(&dev->priv.mkey_lock); + memset(&lout, 0, sizeof(lout)); + spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; - spin_unlock(&dev->priv.mkey_lock); + spin_unlock_irq(&dev->priv.mkey_lock); in->seg.qpn_mkey7_0 |= cpu_to_be32(key); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (callback) { + err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), + callback, context); + return err; + } else { + err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); + } + if (err) { mlx5_core_dbg(dev, "cmd exec faile %d\n", err); return err; } - if (out.hdr.status) { - mlx5_core_dbg(dev, "status %d\n", out.hdr.status); - return mlx5_cmd_status_to_err(&out.hdr); + if (lout.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); + return mlx5_cmd_status_to_err(&lout.hdr); } - mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; - mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", + be32_to_cpu(lout.mkey), key, mr->key); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 3a2408d4482..37b6ad1f9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -57,10 +57,13 @@ struct mlx5_pages_req { }; struct fw_page { - struct rb_node rb_node; - u64 addr; - struct page *page; - u16 func_id; + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; + unsigned long bitmask; + struct list_head list; + unsigned free_count; }; struct mlx5_query_pages_inbox { @@ -90,6 +93,15 @@ struct mlx5_manage_pages_outbox { __be64 pas[0]; }; +enum { + MAX_RECLAIM_TIME_MSECS = 5000, +}; + +enum { + MLX5_MAX_RECLAIM_TIME_MILI = 5000, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096, +}; + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { struct rb_root *root = &dev->priv.page_root; @@ -97,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u struct rb_node *parent = NULL; struct fw_page *nfp; struct fw_page *tfp; + int i; while (*new) { parent = *new; @@ -109,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return -EEXIST; } - nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) return -ENOMEM; nfp->addr = addr; nfp->page = page; nfp->func_id = func_id; + nfp->free_count = MLX5_NUM_4K_IN_PAGE; + for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) + set_bit(i, &nfp->bitmask); rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); + list_add(&nfp->list, &dev->priv.free_list); return 0; } -static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; - struct page *result = NULL; + struct fw_page *result = NULL; struct fw_page *tfp; while (tmp) { @@ -137,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } else if (tfp->addr > addr) { tmp = tmp->rb_right; } else { - rb_erase(&tfp->rb_node, root); - result = tfp->page; - kfree(tfp); + result = tfp; break; } } @@ -172,12 +187,98 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, return err; } +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +{ + struct fw_page *fp; + unsigned n; + + if (list_empty(&dev->priv.free_list)) { + return -ENOMEM; + mlx5_core_warn(dev, "\n"); + } + + fp = list_entry(dev->priv.free_list.next, struct fw_page, list); + n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); + if (n >= MLX5_NUM_4K_IN_PAGE) { + mlx5_core_warn(dev, "alloc 4k bug\n"); + return -ENOENT; + } + clear_bit(n, &fp->bitmask); + fp->free_count--; + if (!fp->free_count) + list_del(&fp->list); + + *addr = fp->addr + n * 4096; + + return 0; +} + +static void free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct fw_page *fwp; + int n; + + fwp = find_fw_page(dev, addr & PAGE_MASK); + if (!fwp) { + mlx5_core_warn(dev, "page not found\n"); + return; + } + + n = (addr & ~PAGE_MASK) % 4096; + fwp->free_count++; + set_bit(n, &fwp->bitmask); + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { + rb_erase(&fwp->rb_node, &dev->priv.page_root); + if (fwp->free_count != 1) + list_del(&fwp->list); + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); + } else if (fwp->free_count == 1) { + list_add(&fwp->list, &dev->priv.free_list); + } +} + +static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +{ + struct page *page; + u64 addr; + int err; + + page = alloc_page(GFP_HIGHUSER); + if (!page) { + mlx5_core_warn(dev, "failed to allocate page\n"); + return -ENOMEM; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + goto out_mapping; + } + + return 0; + +out_mapping: + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +out_alloc: + __free_page(page); + + return err; +} static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; - struct page *page; + struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; @@ -192,27 +293,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) { - err = -ENOMEM; - mlx5_core_warn(dev, "failed to allocate page\n"); - goto out_alloc; - } - addr = dma_map_page(&dev->pdev->dev, page, 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->pdev->dev, addr)) { - mlx5_core_warn(dev, "failed dma mapping page\n"); - __free_page(page); - err = -ENOMEM; - goto out_alloc; - } - err = insert_page(dev, addr, page, func_id); +retry: + err = alloc_4k(dev, &addr); if (err) { - mlx5_core_err(dev, "failed to track allocated page\n"); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - err = -ENOMEM; - goto out_alloc; + if (err == -ENOMEM) + err = alloc_system_page(dev, func_id); + if (err) + goto out_4k; + + goto retry; } in->pas[i] = cpu_to_be64(addr); } @@ -222,7 +311,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - mlx5_core_dbg(dev, "err %d\n", err); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; @@ -243,25 +331,22 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, out_alloc: if (notify_fail) { - memset(in, 0, inlen); - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); - if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) - mlx5_core_warn(dev, "\n"); - } - for (i--; i >= 0; i--) { - addr = be64_to_cpu(in->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", - addr); - continue; + nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) { + mlx5_core_warn(dev, "allocation failed\n"); + goto out_4k; } - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); + memset(&out, 0, sizeof(out)); + nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) + mlx5_core_warn(dev, "page notify failed\n"); + kfree(nin); } +out_4k: + for (i--; i >= 0; i--) + free_4k(dev, be64_to_cpu(in->pas[i])); out_free: mlx5_vfree(in); return err; @@ -272,13 +357,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; - struct page *page; int num_claimed; int outlen; u64 addr; int err; int i; + if (nclaimed) + *nclaimed = 0; + memset(&in, 0, sizeof(in)); outlen = sizeof(*out) + npages * sizeof(out->pas[0]); out = mlx5_vzalloc(outlen); @@ -308,13 +395,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); - page = remove_page(dev, addr); - if (!page) { - mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); - } else { - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(page); - } + free_4k(dev, addr); } out_free: @@ -374,34 +455,44 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) return give_pages(dev, func_id, npages, 0); } +enum { + MLX5_BLKS_FOR_RECLAIM_PAGES = 12 +}; + static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; - ret = (sizeof(lay->in) + sizeof(block->data) - - sizeof(struct mlx5_manage_pages_outbox)) / 8; + ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / + FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) { - unsigned long end = jiffies + msecs_to_jiffies(5000); + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); struct fw_page *fwp; struct rb_node *p; + int nclaimed = 0; int err; do { p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); + err = reclaim_pages(dev, fwp->func_id, + optimal_reclaimed_pages(), + &nclaimed); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); return err; } + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); } if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); @@ -415,6 +506,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; + INIT_LIST_HEAD(&dev->priv.free_list); } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1e..822616e3c37 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - sg, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!ctl->adesc) goto out; diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 075f4e21d33..c83d16dc7cd 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data) w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); ks_wrreg16(ks, KS_MARL, w); - memcpy(ks->mac_addr, data, 6); + memcpy(ks->mac_addr, data, ETH_ALEN); if (ks->enabled) ks_start_rx(ks); @@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev) } netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr); - memcpy(netdev->dev_addr, ks->mac_addr, 6); + memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN); ks_set_mac(ks, netdev->dev_addr); diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 8ebc352bcbe..ddd252a3da9 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev) struct platform_info *info = pci_get_drvdata(pdev); struct dev_info *hw_priv = &info->dev_info; - pci_set_drvdata(pdev, NULL); - release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); for (i = 0; i < hw_priv->hw.dev_count; i++) { @@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state) static char pcidev_name[] = "ksz884xp"; -static struct pci_device_id pcidev_table[] = { +static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = { { PCI_VENDOR_ID_MICREL_KS, 0x8841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_MICREL_KS, 0x8842, diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index bd1a2d2bc2a..cbd01337925 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -26,7 +26,6 @@ #include <linux/of_irq.h> #include <linux/crc32.h> #include <linux/crc32c.h> -#include <linux/dma-mapping.h> #include "moxart_ether.h" @@ -448,7 +447,8 @@ static int moxart_mac_probe(struct platform_device *pdev) irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { netdev_err(ndev, "irq_of_parse_and_map failed\n"); - return -EINVAL; + ret = -EINVAL; + goto irq_map_fail; } priv = netdev_priv(ndev); @@ -472,24 +472,32 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); - if (priv->tx_desc_base == NULL) + if (priv->tx_desc_base == NULL) { + ret = -ENOMEM; goto init_fail; + } priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); - if (priv->rx_desc_base == NULL) + if (priv->rx_desc_base == NULL) { + ret = -ENOMEM; goto init_fail; + } priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, GFP_ATOMIC); - if (!priv->tx_buf_base) + if (!priv->tx_buf_base) { + ret = -ENOMEM; goto init_fail; + } priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, GFP_ATOMIC); - if (!priv->rx_buf_base) + if (!priv->rx_buf_base) { + ret = -ENOMEM; goto init_fail; + } platform_set_drvdata(pdev, ndev); @@ -522,7 +530,8 @@ static int moxart_mac_probe(struct platform_device *pdev) init_fail: netdev_err(ndev, "init failed\n"); moxart_mac_free_memory(ndev); - +irq_map_fail: + free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 149355b52ad..68026f7e8ba 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) { - int rc = true; + bool rc = true; spin_lock(&ss->lock); if ((ss->state & SLICE_LOCKED)) { WARN_ON((ss->state & SLICE_STATE_NAPI)); @@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) { - int rc = true; + bool rc = true; spin_lock_bh(&ss->lock); if ((ss->state & SLICE_LOCKED)) { ss->state |= SLICE_STATE_POLL_YIELD; @@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) /* Walk the multicast list, and add each address */ netdev_for_each_mc_addr(ha, dev) { - memcpy(data, &ha->addr, 6); + memcpy(data, &ha->addr, ETH_ALEN); cmd.data0 = ntohl(data[0]); cmd.data1 = ntohl(data[1]); err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, @@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) } /* change the dev structure */ - memcpy(dev->dev_addr, sa->sa_data, 6); + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); return 0; } @@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev) set_fw_name(mgp, NULL, false); free_netdev(netdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 7a5e295588b..64ec2a437f4 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); err_pci_request_regions: free_netdev(dev); @@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev) pci_release_regions (pdev); iounmap(ioaddr); free_netdev (dev); - pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 4da172ac559..7007d212f3e 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -264,6 +264,7 @@ int xtsonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; + platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); netdev_boot_setup_check(dev); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 51b00941302..9eeddbd0b2c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8185,7 +8185,6 @@ mem_alloc_failed: free_shared_mem(sp); pci_disable_device(pdev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); return ret; @@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev) iounmap(sp->bar0); iounmap(sp->bar1); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5a20eaf903d..f9876ea8c8b 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2072,6 +2072,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev) vdev->config.tx_steering_type; vpath->fifo.ndev = vdev->ndev; vpath->fifo.pdev = vdev->pdev; + + u64_stats_init(&vpath->fifo.stats.syncp); + u64_stats_init(&vpath->ring.stats.syncp); + if (vdev->config.tx_steering_type) vpath->fifo.txq = netdev_get_tx_queue(vdev->ndev, i); @@ -4739,7 +4743,6 @@ _exit6: _exit5: vxge_device_unregister(hldev); _exit4: - pci_set_drvdata(pdev, NULL); vxge_hw_device_terminate(hldev); pci_disable_sriov(pdev); _exit3: @@ -4782,7 +4785,6 @@ static void vxge_remove(struct pci_dev *pdev) vxge_free_mac_add_list(&vdev->vpaths[i]); vxge_device_unregister(hldev); - pci_set_drvdata(pdev, NULL); /* Do not call pci_disable_sriov here, as it will break child devices */ vxge_hw_device_terminate(hldev); iounmap(vdev->bar0); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 098b96dad66..1e8b9514718 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - int result; - memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); + int result, count; + + count = nv_get_sset_count(dev, ETH_SS_TEST); + memset(buffer, 0, count * sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 return; } - if (!nv_loopback_test(dev)) { + if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[3] = 1; } @@ -5619,6 +5621,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) spin_lock_init(&np->lock); spin_lock_init(&np->hwstats_lock); SET_NETDEV_DEV(dev, &pci_dev->dev); + u64_stats_init(&np->swstats_rx_syncp); + u64_stats_init(&np->swstats_tx_syncp); init_timer(&np->oom_kick); np->oom_kick.data = (unsigned long) dev; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index a061b93efe6..ba3ca18611f 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } if (pldat->dma_buff_base_v == 0) { - pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF; - pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask; + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto err_out_free_irq; + pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); /* Allocate a chunk of memory for the DMA ethernet buffers diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 622aa75904c..7dc3e9b06d7 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1545,15 +1545,16 @@ static int octeon_mgmt_probe(struct platform_device *pdev) mac = of_get_mac_address(pdev->dev.of_node); - if (mac && is_valid_ether_addr(mac)) + if (mac) memcpy(netdev->dev_addr, mac, ETH_ALEN); else eth_hw_addr_random(netdev); p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (result) + goto err; netif_carrier_off(netdev); result = register_netdev(netdev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 6797b107587..2a9003071d5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -653,38 +653,38 @@ struct pch_gbe_adapter { extern const char pch_driver_version[]; /* pch_gbe_main.c */ -extern int pch_gbe_up(struct pch_gbe_adapter *adapter); -extern void pch_gbe_down(struct pch_gbe_adapter *adapter); -extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter); -extern void pch_gbe_reset(struct pch_gbe_adapter *adapter); -extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, - struct pch_gbe_tx_ring *txdr); -extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, - struct pch_gbe_rx_ring *rxdr); -extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, - struct pch_gbe_tx_ring *tx_ring); -extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, - struct pch_gbe_rx_ring *rx_ring); -extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); -extern u32 pch_ch_control_read(struct pci_dev *pdev); -extern void pch_ch_control_write(struct pci_dev *pdev, u32 val); -extern u32 pch_ch_event_read(struct pci_dev *pdev); -extern void pch_ch_event_write(struct pci_dev *pdev, u32 val); -extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev); -extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); -extern u64 pch_rx_snap_read(struct pci_dev *pdev); -extern u64 pch_tx_snap_read(struct pci_dev *pdev); -extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev); +int pch_gbe_up(struct pch_gbe_adapter *adapter); +void pch_gbe_down(struct pch_gbe_adapter *adapter); +void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter); +void pch_gbe_reset(struct pch_gbe_adapter *adapter); +int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *txdr); +int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rxdr); +void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring); +void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring); +void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); +u32 pch_ch_control_read(struct pci_dev *pdev); +void pch_ch_control_write(struct pci_dev *pdev, u32 val); +u32 pch_ch_event_read(struct pci_dev *pdev); +void pch_ch_event_write(struct pci_dev *pdev, u32 val); +u32 pch_src_uuid_lo_read(struct pci_dev *pdev); +u32 pch_src_uuid_hi_read(struct pci_dev *pdev); +u64 pch_rx_snap_read(struct pci_dev *pdev); +u64 pch_tx_snap_read(struct pci_dev *pdev); +int pch_set_station_address(u8 *addr, struct pci_dev *pdev); /* pch_gbe_param.c */ -extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter); +void pch_gbe_check_options(struct pch_gbe_adapter *adapter); /* pch_gbe_ethtool.c */ -extern void pch_gbe_set_ethtool_ops(struct net_device *netdev); +void pch_gbe_set_ethtool_ops(struct net_device *netdev); /* pch_gbe_mac.c */ -extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); -extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); -extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, - u32 addr, u32 dir, u32 reg, u16 data); +s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); +s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); +u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, + u16 data); #endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c2c81..27ffe0ebf0a 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - adapter->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - adapter->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index cac33e5f9bc..b6bdeb3c197 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev) iounmap(hmp->base); free_netdev(dev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index d28593b1fc3..07a890eb72a 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -513,7 +513,6 @@ err_out_unmap_rx: err_out_unmap_tx: pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_cleardev: - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); @@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev) pci_release_regions (pdev); free_netdev (dev); - pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 5b65356e756..dbaa49e58b0 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev) pasemi_dma_free_chan(&mac->tx->chan); pasemi_dma_free_chan(&mac->rx->chan); - pci_set_drvdata(pdev, NULL); free_netdev(netdev); } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 32675e16021..9adcdbb4947 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 81 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.81" +#define _NETXEN_NIC_LINUX_SUBVERSION 82 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.82" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) @@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); -extern void netxen_change_ringparam(struct netxen_adapter *adapter); -extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, - int *valp); +void netxen_change_ringparam(struct netxen_adapter *adapter); +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); extern const struct ethtool_ops netxen_nic_ethtool_ops; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h index 32c790659f9..0c64c82b9ac 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -958,6 +958,7 @@ enum { #define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) #define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) #define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) +#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178)) /* MiniDIMM related macros */ #define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258)) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 8375cbde996..67efe754367 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) mac_req = (nx_mac_req_t *)&req.words[0]; mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); + memcpy(mac_req->mac_addr, addr, ETH_ALEN); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index cbd75f97ffb..3bec8cfebf9 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter, return 0; } +#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) +#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) + +static void netxen_read_ula_info(struct netxen_adapter *adapter) +{ + u32 temp; + + /* Print ULA info only once for an adapter */ + if (adapter->portnum != 0) + return; + + temp = NXRD32(adapter, NETXEN_ULA_KEY); + switch (temp) { + case NETXEN_ULA_ADAPTER_KEY: + dev_info(&adapter->pdev->dev, "ULA adapter"); + break; + case NETXEN_NON_ULA_ADAPTER_KEY: + dev_info(&adapter->pdev->dev, "non ULA adapter"); + break; + default: + break; + } + + return; +} + #ifdef CONFIG_PCIEAER static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) { @@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_msi; } + netxen_read_ula_info(adapter); + err = netxen_setup_netdev(adapter, netdev); if (err) goto err_out_disable_msi; @@ -1602,7 +1630,6 @@ err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return err; } @@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(netdev); } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 91a8fcd6c24..0758b943535 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3916,7 +3916,6 @@ err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return err; } @@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev) iounmap(qdev->mem_map_registers); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(ndev); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 81bf83604c4..631ea0ac1cd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 50 -#define QLCNIC_LINUX_VERSIONID "5.3.50" +#define _QLCNIC_LINUX_SUBVERSION 52 +#define QLCNIC_LINUX_VERSIONID "5.3.52" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -98,8 +98,22 @@ #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + MGMT_CMD_DESC_RESV) #define QLCNIC_MAX_TX_TIMEOUTS 2 -#define QLCNIC_MAX_TX_RINGS 8 -#define QLCNIC_MAX_SDS_RINGS 8 + +/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */ +#define QLCNIC_SINGLE_RING 1 +#define QLCNIC_DEF_SDS_RINGS 4 +#define QLCNIC_DEF_TX_RINGS 4 +#define QLCNIC_MAX_VNIC_TX_RINGS 4 +#define QLCNIC_MAX_VNIC_SDS_RINGS 4 + +enum qlcnic_queue_type { + QLCNIC_TX_QUEUE = 1, + QLCNIC_RX_QUEUE, +}; + +/* Operational mode for driver */ +#define QLCNIC_VNIC_MODE 0xFF +#define QLCNIC_DEFAULT_MODE 0x0 /* * Following are the states of the Phantom. Phantom will set them and @@ -533,6 +547,14 @@ struct qlcnic_host_sds_ring { char name[IFNAMSIZ + 12]; } ____cacheline_internodealigned_in_smp; +struct qlcnic_tx_queue_stats { + u64 xmit_on; + u64 xmit_off; + u64 xmit_called; + u64 xmit_finished; + u64 tx_bytes; +}; + struct qlcnic_host_tx_ring { int irq; void __iomem *crb_intr_mask; @@ -544,10 +566,7 @@ struct qlcnic_host_tx_ring { u32 sw_consumer; u32 num_desc; - u64 xmit_on; - u64 xmit_off; - u64 xmit_called; - u64 xmit_finished; + struct qlcnic_tx_queue_stats tx_stats; void __iomem *crb_cmd_producer; struct cmd_desc_type0 *desc_head; @@ -940,8 +959,6 @@ struct qlcnic_ipaddr { #define QLCNIC_BEACON_EANBLE 0xC #define QLCNIC_BEACON_DISABLE 0xD -#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 -#define QLCNIC_DEF_NUM_TX_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 #define QLCNIC_MSIX_TBL_PGSIZE 4096 @@ -961,8 +978,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_SRIOV_CAPABLE 11 #define __QLCNIC_MBX_POLL_ENABLE 12 #define __QLCNIC_DIAG_MODE 13 -#define __QLCNIC_DCB_STATE 14 -#define __QLCNIC_DCB_IN_AEN 15 +#define __QLCNIC_MAINTENANCE_MODE 16 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -1013,7 +1029,6 @@ struct qlcnic_adapter { unsigned long state; u32 flags; - int max_drv_tx_rings; u16 num_txd; u16 num_rxd; u16 num_jumbo_rxd; @@ -1021,7 +1036,13 @@ struct qlcnic_adapter { u16 max_jumbo_rxd; u8 max_rds_rings; - u8 max_sds_rings; + + u8 max_sds_rings; /* max sds rings supported by adapter */ + u8 max_tx_rings; /* max tx rings supported by adapter */ + + u8 drv_tx_rings; /* max tx rings supported by driver */ + u8 drv_sds_rings; /* max sds rings supported by driver */ + u8 rx_csum; u8 portnum; @@ -1199,6 +1220,7 @@ struct qlcnic_npar_info { u8 promisc_mode; u8 offload_flags; u8 pci_func; + u8 mac[ETH_ALEN]; }; struct qlcnic_eswitch { @@ -1543,12 +1565,13 @@ int qlcnic_loopback_test(struct net_device *, u8); /* Functions from qlcnic_main.c */ int qlcnic_reset_context(struct qlcnic_adapter *); -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); -int qlcnic_diag_alloc_res(struct net_device *netdev, int test); -netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int); -int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); -int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq); +void qlcnic_diag_free_res(struct net_device *netdev, int); +int qlcnic_diag_alloc_res(struct net_device *netdev, int); +netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *); +void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8); +void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8); +int qlcnic_setup_rings(struct qlcnic_adapter *, u8, u8); +int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); @@ -1641,19 +1664,18 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int err, tx_q; - - tx_q = adapter->max_drv_tx_rings; + int err; - netdev->num_tx_queues = tx_q; - netdev->real_num_tx_queues = tx_q; + netdev->num_tx_queues = adapter->drv_tx_rings; + netdev->real_num_tx_queues = adapter->drv_tx_rings; - err = netif_set_real_num_tx_queues(netdev, tx_q); + err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); if (err) dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n", - tx_q); + adapter->drv_tx_rings); else - dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q); + dev_info(&adapter->pdev->dev, "Set %d Tx queues\n", + adapter->drv_tx_rings); return err; } @@ -1695,7 +1717,7 @@ struct qlcnic_hardware_ops { int (*write_reg) (struct qlcnic_adapter *, ulong, u32); void (*get_ocm_win) (struct qlcnic_hardware_context *); int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8); - int (*setup_intr) (struct qlcnic_adapter *, u8, int); + int (*setup_intr) (struct qlcnic_adapter *); int (*alloc_mbx_args)(struct qlcnic_cmd_args *, struct qlcnic_adapter *, u32); int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); @@ -1766,10 +1788,9 @@ static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function); } -static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, - u8 num_intr, int txq) +static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter) { - return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq); + return adapter->ahw->hw_ops->setup_intr(adapter); } static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, @@ -2005,7 +2026,7 @@ static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter) static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter) { test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); - adapter->max_drv_tx_rings = 1; + adapter->drv_tx_rings = QLCNIC_SINGLE_RING; } /* When operating in a muti tx mode, driver needs to write 0x1 @@ -2115,98 +2136,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) return status; } - -static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->get_hw_capability) - return dcb->ops->get_hw_capability(adapter); - - return 0; -} - -static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->free) - dcb->ops->free(adapter); -} - -static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->attach) - return dcb->ops->attach(adapter); - - return 0; -} - -static inline int -qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->query_hw_capability) - return dcb->ops->query_hw_capability(adapter, buf); - - return 0; -} - -static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->get_info) - dcb->ops->get_info(adapter); -} - -static inline int -qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->query_cee_param) - return dcb->ops->query_cee_param(adapter, buf, type); - - return 0; -} - -static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->get_cee_cfg) - return dcb->ops->get_cee_cfg(adapter); - - return 0; -} - -static inline void -qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->register_aen) - dcb->ops->register_aen(adapter, flag); -} - -static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter, - void *msg) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->handle_aen) - dcb->ops->handle_aen(adapter, msg); -} - -static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter) -{ - struct qlcnic_dcb *dcb = adapter->dcb; - - if (dcb && dcb->ops->init_dcbnl_ops) - dcb->ops->init_dcbnl_ops(adapter); -} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 3ca00e05f23..b1cb0ffb15c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -13,7 +13,6 @@ #include <linux/interrupt.h> #include <linux/aer.h> -#define QLCNIC_MAX_TX_QUEUES 1 #define RSS_HASHTYPE_IP_TCP 0x3 #define QLC_83XX_FW_MBX_CMD 0 @@ -268,20 +267,18 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, } } -int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq) +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) { int err, i, num_msix; struct qlcnic_hardware_context *ahw = adapter->ahw; - if (!num_intr) - num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; - num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), - num_intr)); + num_msix = adapter->drv_sds_rings; + /* account for AEN interrupt MSI-X based interrupts */ num_msix += 1; if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) - num_msix += adapter->max_drv_tx_rings; + num_msix += adapter->drv_tx_rings; err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) @@ -325,7 +322,8 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter) { - writel(1, adapter->tgt_mask_reg); + if (adapter->tgt_mask_reg) + writel(1, adapter->tgt_mask_reg); } /* Enable MSI-x and INT-x interrupts */ @@ -498,8 +496,11 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) num_msix = 0; msleep(20); - synchronize_irq(adapter->msix_entries[num_msix].vector); - free_irq(adapter->msix_entries[num_msix].vector, adapter); + + if (adapter->msix_entries) { + synchronize_irq(adapter->msix_entries[num_msix].vector); + free_irq(adapter->msix_entries[num_msix].vector, adapter); + } } int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) @@ -760,6 +761,9 @@ int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, int cmd_type, err, opcode; unsigned long timeout; + if (!mbx) + return -EIO; + opcode = LSW(cmd->req.arg[0]); cmd_type = cmd->type; err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout); @@ -902,7 +906,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) QLCNIC_MBX_RSP(event[0])); break; case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT: - qlcnic_dcb_handle_aen(adapter, (void *)&event[1]); + qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]); break; default: dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", @@ -979,14 +983,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; - num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS); + num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; ahw->hw_ops->alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_ADD_RCV_RINGS); cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ index = 2; - for (i = 8; i < adapter->max_sds_rings; i++) { + for (i = 8; i < adapter->drv_sds_rings; i++) { memset(&sds_mbx, 0, sds_mbx_size); sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; @@ -1021,7 +1025,7 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1]; index = 0; /* status descriptor ring */ - for (i = 8; i < adapter->max_sds_rings; i++) { + for (i = 8; i < adapter->drv_sds_rings; i++) { sds = &recv_ctx->sds_rings[i]; sds->crb_sts_consumer = ahw->pci_base0 + mbx_out->host_csmr[index]; @@ -1079,10 +1083,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; num_rds = adapter->max_rds_rings; - if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS) - num_sds = adapter->max_sds_rings; + if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS) + num_sds = adapter->drv_sds_rings; else - num_sds = QLCNIC_MAX_RING_SETS; + num_sds = QLCNIC_MAX_SDS_RINGS; sds_mbx_size = sizeof(struct qlcnic_sds_mbx); rds_mbx_size = sizeof(struct qlcnic_rds_mbx); @@ -1183,7 +1187,7 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) sds->crb_intr_mask = ahw->pci_base0 + intr_mask; } - if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS) + if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS) err = qlcnic_83xx_add_rings(adapter); out: qlcnic_free_mbx_args(&cmd); @@ -1239,9 +1243,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, mbx.size = tx->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) { if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) - msix_vector = adapter->max_sds_rings + ring; + msix_vector = adapter->drv_sds_rings + ring; else - msix_vector = adapter->max_sds_rings - 1; + msix_vector = adapter->drv_sds_rings - 1; msix_id = ahw->intr_tbl[msix_vector].id; } else { msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); @@ -1264,7 +1268,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp); cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; - cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp; + cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp; + buf = &cmd.req.arg[6]; memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); /* send the mailbox command*/ @@ -1279,7 +1284,7 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, tx->ctx_id = mbx_out->ctx_id; if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { - intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; + intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src; tx->crb_intr_mask = ahw->pci_base0 + intr_mask; } dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n", @@ -1290,7 +1295,7 @@ out: } static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, - int num_sds_ring) + u8 num_sds_ring) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; @@ -1306,7 +1311,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, qlcnic_detach(adapter); - adapter->max_sds_rings = 1; + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; @@ -1320,7 +1325,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, if (ret) { qlcnic_detach(adapter); if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { - adapter->max_sds_rings = num_sds_ring; + adapter->drv_sds_rings = num_sds_ring; qlcnic_attach(adapter); } netif_device_attach(netdev); @@ -1333,7 +1338,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, } if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_83xx_enable_intr(adapter, sds_ring); } @@ -1354,7 +1359,7 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, } static void qlcnic_83xx_diag_free_res(struct net_device *netdev, - int max_sds_rings) + u8 drv_sds_rings) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; @@ -1362,7 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_83xx_disable_intr(adapter, sds_ring); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) @@ -1386,7 +1391,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, } } adapter->ahw->diag_test = 0; - adapter->max_sds_rings = max_sds_rings; + adapter->drv_sds_rings = drv_sds_rings; if (qlcnic_attach(adapter)) goto out; @@ -1648,7 +1653,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; - int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings; + u8 drv_sds_rings = adapter->drv_sds_rings; + u8 drv_tx_rings = adapter->drv_tx_rings; + int ret = 0, loop = 0; if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, @@ -1670,7 +1677,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) mode == QLCNIC_ILB_MODE ? "internal" : "external"); ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, - max_sds_rings); + drv_sds_rings); if (ret) goto fail_diag_alloc; @@ -1708,10 +1715,11 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) qlcnic_83xx_clear_lb_mode(adapter, mode); free_diag_res: - qlcnic_83xx_diag_free_res(netdev, max_sds_rings); + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_alloc: - adapter->max_sds_rings = max_sds_rings; + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; qlcnic_release_diag_lock(adapter); return ret; } @@ -1722,7 +1730,7 @@ static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter, struct qlcnic_hardware_context *ahw = adapter->ahw; int temp; - netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n", + netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n", ahw->extend_lb_time); temp = ahw->extend_lb_time * 1000; *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT; @@ -2276,9 +2284,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; npar_info->max_linkspeed_reg_offset = temp; } - if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) - memcpy(ahw->extra_capability, &cmd.rsp.arg[16], - sizeof(ahw->extra_capability)); + + memcpy(ahw->extra_capability, &cmd.rsp.arg[16], + sizeof(ahw->extra_capability)); out: qlcnic_free_mbx_args(&cmd); @@ -2321,19 +2329,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, i++; memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); i = i + 3; - if (ahw->op_mode == QLCNIC_MGMT_FUNC) - dev_info(dev, "id = %d active = %d type = %d\n" - "\tport = %d min bw = %d max bw = %d\n" - "\tmac_addr = %pM\n", pci_info->id, - pci_info->active, pci_info->type, - pci_info->default_port, - pci_info->tx_min_bw, - pci_info->tx_max_bw, pci_info->mac); } - if (ahw->op_mode == QLCNIC_MGMT_FUNC) - dev_info(dev, "Max functions = %d, active functions = %d\n", - ahw->max_pci_func, ahw->act_pci_func); - } else { dev_err(dev, "Failed to get PCI Info, error = %d\n", err); err = -EIO; @@ -3061,11 +3057,14 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, int status = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; - /* Get port configuration info */ - status = qlcnic_83xx_get_port_info(adapter); - /* Get Link Status related info */ - config = qlcnic_83xx_test_link(adapter); - ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); + if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + /* Get port configuration info */ + status = qlcnic_83xx_get_port_info(adapter); + /* Get Link Status related info */ + config = qlcnic_83xx_test_link(adapter); + ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); + } + /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; @@ -3279,12 +3278,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) return 0; } -int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) +inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) { return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) * - sizeof(adapter->ahw->ext_reg_tbl)) + - (ARRAY_SIZE(qlcnic_83xx_reg_tbl) + - sizeof(adapter->ahw->reg_tbl)); + sizeof(*adapter->ahw->ext_reg_tbl)) + + (ARRAY_SIZE(qlcnic_83xx_reg_tbl) * + sizeof(*adapter->ahw->reg_tbl)); } int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) @@ -3305,10 +3304,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; + u8 val, drv_sds_rings = adapter->drv_sds_rings; + u8 drv_tx_rings = adapter->drv_tx_rings; u32 data; u16 intrpt_id, id; - u8 val; - int ret, max_sds_rings = adapter->max_sds_rings; + int ret; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting\n"); @@ -3321,7 +3321,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) } ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, - max_sds_rings); + drv_sds_rings); if (ret) goto fail_diag_irq; @@ -3358,10 +3358,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); - qlcnic_83xx_diag_free_res(netdev, max_sds_rings); + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: - adapter->max_sds_rings = max_sds_rings; + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; qlcnic_release_diag_lock(adapter); return ret; } @@ -3381,10 +3382,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter, } config = ahw->port_config; if (config & QLC_83XX_CFG_STD_PAUSE) { - if (config & QLC_83XX_CFG_STD_TX_PAUSE) + switch (MSW(config)) { + case QLC_83XX_TX_PAUSE: + pause->tx_pause = 1; + break; + case QLC_83XX_RX_PAUSE: + pause->rx_pause = 1; + break; + case QLC_83XX_TX_RX_PAUSE: + default: + /* Backward compatibility for existing + * flash definitions + */ pause->tx_pause = 1; - if (config & QLC_83XX_CFG_STD_RX_PAUSE) pause->rx_pause = 1; + } } if (QLC_83XX_AUTONEG(config)) @@ -3427,7 +3439,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter, ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE; ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE; } else if (!pause->rx_pause && !pause->tx_pause) { - ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE; + ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE | + QLC_83XX_CFG_STD_PAUSE); } status = qlcnic_83xx_set_port_config(adapter); if (status) { @@ -3503,7 +3516,7 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) if (err) return err; - if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) { + if (ahw->nic_mode == QLCNIC_VNIC_MODE) { if (ahw->op_mode == QLCNIC_MGMT_FUNC) { qlcnic_83xx_set_vnic_opmode(adapter); } else { @@ -3524,12 +3537,15 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) { - INIT_COMPLETION(mbx->completion); + reinit_completion(&mbx->completion); set_bit(QLC_83XX_MBX_READY, &mbx->status); } void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx) { + if (!mbx) + return; + destroy_workqueue(mbx->work_q); kfree(mbx); } @@ -3650,6 +3666,9 @@ void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + if (!mbx) + return; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); complete(&mbx->completion); cancel_work_sync(&mbx->work); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 533e150503a..4cae6caa6bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -61,7 +61,6 @@ #define QLC_83XX_HOST_SDS_MBX_IDX 8 #define QLCNIC_HOST_RDS_MBX_IDX 88 -#define QLCNIC_MAX_RING_SETS 8 /* Pause control registers */ #define QLC_83XX_SRE_SHIM_REG 0x0D200284 @@ -183,8 +182,8 @@ struct qlcnic_rcv_mbx_out { u8 num_pci_func; u8 state; #endif - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; + u32 host_csmr[QLCNIC_MAX_SDS_RINGS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS]; } __packed; struct qlcnic_add_rings_mbx_out { @@ -197,8 +196,8 @@ struct qlcnic_add_rings_mbx_out { u8 sts_num; u8 rcv_num; #endif - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; + u32 host_csmr[QLCNIC_MAX_SDS_RINGS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS]; } __packed; /* Transmit context mailbox inbox registers @@ -363,6 +362,9 @@ enum qlcnic_83xx_states { #define QLC_83XX_LINK_EEE(data) ((data) & BIT_13) #define QLC_83XX_DCBX(data) (((data) >> 28) & 7) #define QLC_83XX_AUTONEG(data) ((data) & BIT_15) +#define QLC_83XX_TX_PAUSE 0x10 +#define QLC_83XX_RX_PAUSE 0x20 +#define QLC_83XX_TX_RX_PAUSE 0x30 #define QLC_83XX_CFG_STD_PAUSE (1 << 5) #define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20) #define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20) @@ -412,8 +414,6 @@ enum qlcnic_83xx_states { #define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) #define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) #define QLC_83XX_ESWITCH_CAPABILITY BIT_23 -#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF -#define QLC_83XX_DEFAULT_MODE 0x0 #define QLC_83XX_SRIOV_MODE 0x1 #define QLCNIC_BRDTYPE_83XX_10G 0x0083 @@ -521,7 +521,7 @@ enum qlc_83xx_ext_regs { /* 83xx funcitons */ int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); -int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int); +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *); void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); @@ -626,7 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); -int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int); +int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f09e787af0b..89208e5b25d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); - qlcnic_dcb_get_info(adapter); + qlcnic_dcb_get_info(adapter->dcb); qlcnic_83xx_idc_attach_driver(adapter); return 0; @@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; + u32 owner; u32 val; /* Perform NIC configuration based ready state entry actions */ @@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); + } else { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) + qlcnic_dump_fw(adapter); } return -EIO; } @@ -897,7 +902,7 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); clear_bit(QLC_83XX_MBX_READY, &mbx->status); - if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) + if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); if (qlcnic_check_diag_status(adapter)) { @@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; qlcnic_83xx_periodic_tasks(adapter); + /* Do not reschedule if firmaware is in hanged state and auto + * recovery is disabled + */ + if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset) + return; + /* Re-schedule the function */ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, @@ -2022,6 +2033,8 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) ahw->max_mac_filters = nic_info.max_mac_filters; ahw->max_mtu = nic_info.max_mtu; + adapter->max_tx_rings = ahw->max_tx_ques; + adapter->max_sds_rings = ahw->max_rx_ques; /* eSwitch capability indicates vNIC mode. * vNIC and SRIOV are mutually exclusive operational modes. * If SR-IOV capability is detected, SR-IOV physical function @@ -2034,7 +2047,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) return QLC_83XX_DEFAULT_OPMODE; if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) - return QLC_83XX_VIRTUAL_NIC_MODE; + return QLCNIC_VNIC_MODE; return QLC_83XX_DEFAULT_OPMODE; } @@ -2048,15 +2061,20 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) if (ret == -EIO) return -EIO; - if (ret == QLC_83XX_VIRTUAL_NIC_MODE) { - ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; + if (ret == QLCNIC_VNIC_MODE) { + ahw->nic_mode = QLCNIC_VNIC_MODE; + if (qlcnic_83xx_config_vnic_opmode(adapter)) return -EIO; + adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; + adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; } else if (ret == QLC_83XX_DEFAULT_OPMODE) { - ahw->nic_mode = QLC_83XX_DEFAULT_MODE; + ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + adapter->max_sds_rings = ahw->max_rx_ques; + adapter->max_tx_rings = ahw->max_tx_ques; } else { return -EIO; } @@ -2159,13 +2177,34 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) return err; } +static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter) +{ + u8 rx_cnt = QLCNIC_DEF_SDS_RINGS; + u8 tx_cnt = QLCNIC_DEF_TX_RINGS; + + adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + + if (!adapter->ahw->msix_supported) { + rx_cnt = QLCNIC_SINGLE_RING; + tx_cnt = QLCNIC_SINGLE_RING; + } + + /* compute and set drv sds rings */ + qlcnic_set_tx_ring_count(adapter, tx_cnt); + qlcnic_set_sds_ring_count(adapter, rx_cnt); +} int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_dcb *dcb; int err = 0; ahw->msix_supported = !!qlcnic_use_msi_x; + + qlcnic_83xx_init_rings(adapter); + err = qlcnic_83xx_init_mailbox_work(adapter); if (err) goto exit; @@ -2178,22 +2217,26 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) return err; } + if (qlcnic_83xx_read_flash_descriptor_table(adapter) || + qlcnic_83xx_read_flash_mfg_id(adapter)) { + dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n"); + err = -ENOTRECOVERABLE; + goto detach_mbx; + } + err = qlcnic_83xx_check_hw_status(adapter); if (err) goto detach_mbx; - if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) - qlcnic_83xx_read_flash_mfg_id(adapter); - err = qlcnic_83xx_get_fw_info(adapter); if (err) goto detach_mbx; err = qlcnic_83xx_idc_init(adapter); if (err) - goto clear_fw_info; + goto detach_mbx; - err = qlcnic_setup_intr(adapter, 0, 0); + err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto disable_intr; @@ -2215,13 +2258,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (err) goto disable_mbx_intr; + /* Perform operating mode specific initialization */ err = adapter->nic_ops->init_driver(adapter); if (err) goto disable_mbx_intr; - if (adapter->dcb && qlcnic_dcb_attach(adapter)) - qlcnic_clear_dcb_ops(adapter); + dcb = adapter->dcb; + + if (dcb && qlcnic_dcb_attach(dcb)) + qlcnic_clear_dcb_ops(dcb); /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); @@ -2233,12 +2279,10 @@ disable_mbx_intr: disable_intr: qlcnic_teardown_intr(adapter); -clear_fw_info: - kfree(ahw->fw_info); - detach_mbx: qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); + ahw->mailbox = NULL; exit: return err; } @@ -2251,7 +2295,7 @@ void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter) clear_bit(QLC_83XX_MBX_READY, &idc->status); cancel_delayed_work_sync(&adapter->fw_work); - if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) + if (ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); qlcnic_83xx_idc_detach_driver(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index 0248a4c2f5d..734d28602ac 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter) **/ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) { - int err = -EIO; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_npar_info *npar; + int i, err = -EIO; qlcnic_83xx_get_minidump_template(adapter); + if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) { if (qlcnic_init_pci_info(adapter)) return err; + npar = adapter->npars; + + for (i = 0; i < ahw->act_pci_func; i++, npar++) { + dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n", + npar->pci_func, npar->active, npar->type, + npar->phy_port, npar->min_bw, npar->max_bw, + npar->mac); + } + + dev_info(dev, "Max functions = %d, active functions = %d\n", + ahw->max_pci_func, ahw->act_pci_func); + if (qlcnic_83xx_set_vnic_opmode(adapter)) return err; @@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) return err; qlcnic_83xx_config_vnic_buff_descriptors(adapter); - adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; qlcnic_83xx_enable_vnic_mode(adapter, 1); - dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n", - adapter->ahw->fw_hal_version); + dev_info(dev, "HAL Version: %d, Management function\n", + ahw->fw_hal_version); return 0; } @@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter, - int func, int *port_id) +int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter, + int func, int *port_id) { struct qlcnic_info nic_info; int err = 0; @@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter, else err = -EIO; - return err; -} - -int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func) -{ - int id, err = 0; - - err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id); - if (err) - return err; - - if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) { - if (!qlcnic_enable_eswitch(adapter, id, 1)) - adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE; - else - err = -EIO; - } + if (!err) + adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE; return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 86850dd633a..859cb161fc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -270,7 +270,7 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) int err; nrds_rings = adapter->max_rds_rings; - nsds_rings = adapter->max_sds_rings; + nsds_rings = adapter->drv_sds_rings; rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, nsds_rings); @@ -475,7 +475,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { - temp_nsds_rings = adapter->max_sds_rings; + temp_nsds_rings = adapter->drv_sds_rings; index = temp_nsds_rings + ring; msix_id = ahw->intr_tbl[index].id; prq->msi_index = cpu_to_le16(msix_id); @@ -512,7 +512,7 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) { - index = adapter->max_sds_rings + ring; + index = adapter->drv_sds_rings + ring; intr_mask = ahw->intr_tbl[index].src; tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; } @@ -582,7 +582,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, @@ -616,7 +616,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) } - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, @@ -664,7 +664,7 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) if (err) goto err_out; - for (ring = 0; ring < dev->max_drv_tx_rings; ring++) { + for (ring = 0; ring < dev->drv_tx_rings; ring++) { err = qlcnic_fw_cmd_create_tx_ctx(dev, &dev->tx_ring[ring], ring); @@ -703,7 +703,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { qlcnic_fw_cmd_del_rx_ctx(adapter); - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) + for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_fw_cmd_del_tx_ctx(adapter, &adapter->tx_ring[ring]); @@ -733,7 +733,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) recv_ctx = adapter->recv_ctx; - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring->hw_consumer != NULL) { dma_free_coherent(&adapter->pdev->dev, sizeof(u32), @@ -764,7 +764,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) } } - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (sds_ring->desc_head != NULL) { @@ -895,6 +895,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); npar_info->capabilities = le32_to_cpu(nic_info->capabilities); npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); + adapter->max_tx_rings = npar_info->max_tx_ques; + adapter->max_sds_rings = npar_info->max_rx_ques; } qlcnic_free_mbx_args(&cmd); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index d62d5ce432e..86bca7c14f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops; static void qlcnic_dcb_aen_work(struct work_struct *); static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); -static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *); -static void __qlcnic_dcb_free(struct qlcnic_adapter *); -static int __qlcnic_dcb_attach(struct qlcnic_adapter *); -static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *); -static void __qlcnic_dcb_get_info(struct qlcnic_adapter *); - -static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *); -static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); -static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *); -static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *); - -static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *); -static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8); -static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *); -static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool); -static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *); +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *); +static void __qlcnic_dcb_free(struct qlcnic_dcb *); +static int __qlcnic_dcb_attach(struct qlcnic_dcb *); +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *); +static void __qlcnic_dcb_get_info(struct qlcnic_dcb *); + +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *); +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *); +static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *); + +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *); +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *); +static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool); +static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *); struct qlcnic_dcb_capability { bool tsa_capability; @@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .query_cee_param = qlcnic_83xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, .register_aen = qlcnic_83xx_dcb_register_aen, - .handle_aen = qlcnic_83xx_dcb_handle_aen, + .aen_handler = qlcnic_83xx_dcb_aen_handler, }; static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { @@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, .query_cee_param = qlcnic_82xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, - .handle_aen = qlcnic_82xx_dcb_handle_aen, + .aen_handler = qlcnic_82xx_dcb_aen_handler, }; static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) @@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map) return j; } -static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter) +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb) { - if (test_bit(__QLCNIC_DCB_STATE, &adapter->state)) - adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; + if (test_bit(QLCNIC_DCB_STATE, &dcb->state)) + dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; } static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) @@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) adapter->dcb->ops = &qlcnic_83xx_dcb_ops; } -int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) +int qlcnic_register_dcb(struct qlcnic_adapter *adapter) { struct qlcnic_dcb *dcb; @@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) adapter->dcb = dcb; dcb->adapter = adapter; qlcnic_set_dcb_ops(adapter); + dcb->state = 0; return 0; } -static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) +static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb) { - struct qlcnic_dcb *dcb = adapter->dcb; + struct qlcnic_adapter *adapter; if (!dcb) return; - qlcnic_dcb_register_aen(adapter, 0); + adapter = dcb->adapter; + qlcnic_dcb_register_aen(dcb, 0); - while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) usleep_range(10000, 11000); cancel_delayed_work_sync(&dcb->aen_work); @@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter) adapter->dcb = NULL; } -static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter) +static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) { - qlcnic_dcb_get_hw_capability(adapter); - qlcnic_dcb_get_cee_cfg(adapter); - qlcnic_dcb_register_aen(adapter, 1); + qlcnic_dcb_get_hw_capability(dcb); + qlcnic_dcb_get_cee_cfg(dcb); + qlcnic_dcb_register_aen(dcb, 1); } -static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) +static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb) { - struct qlcnic_dcb *dcb = adapter->dcb; int err = 0; INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); if (!dcb->wq) { - dev_err(&adapter->pdev->dev, + dev_err(&dcb->adapter->pdev->dev, "DCB workqueue allocation failed. DCB will be disabled\n"); return -1; } @@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter) goto out_free_cfg; } - qlcnic_dcb_get_info(adapter); + qlcnic_dcb_get_info(dcb); return 0; out_free_cfg: @@ -345,9 +346,9 @@ out_free_wq: return err; } -static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, - char *buf) +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) { + struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_cmd_args cmd; u32 mbx_out; int err; @@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, return err; } -static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val) +static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val) { - struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; + struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); - err = qlcnic_dcb_query_hw_capability(adapter, (char *)val); + err = qlcnic_dcb_query_hw_capability(dcb, (char *)val); if (err) return err; @@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val) if (cap->max_num_tc > QLC_DCB_MAX_TC || cap->max_ets_tc > cap->max_num_tc || cap->max_pfc_tc > cap->max_num_tc) { - dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n"); + dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n"); return -EINVAL; } return err; } -static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { - struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_cfg *cfg = dcb->cfg; struct qlcnic_dcb_capability *cap; u32 mbx_out; int err; - err = __qlcnic_dcb_get_capability(adapter, &mbx_out); + err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; @@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) - set_bit(__QLCNIC_DCB_STATE, &adapter->state); + set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } -static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) { u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); + struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; struct device *dev = &adapter->pdev->dev; dma_addr_t cardrsp_phys_addr; @@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, return -EINVAL; } - addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr, - GFP_KERNEL); + addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; @@ -488,72 +489,67 @@ out: qlcnic_free_mbx_args(&cmd); out_free_rsp: - dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr); + dma_free_coherent(dev, size, addr, cardrsp_phys_addr); return err; } -static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_mbx_params *mbx; int err; - mbx = adapter->dcb->param; + mbx = dcb->param; if (!mbx) return 0; - err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0], + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0], QLC_DCB_LOCAL_PARAM_FWID); if (err) return err; - err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1], + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1], QLC_DCB_OPER_PARAM_FWID); if (err) return err; - err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2], + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2], QLC_DCB_PEER_PARAM_FWID); if (err) return err; mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; - qlcnic_dcb_data_cee_param_map(adapter); + qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } static void qlcnic_dcb_aen_work(struct work_struct *work) { - struct qlcnic_adapter *adapter; struct qlcnic_dcb *dcb; dcb = container_of(work, struct qlcnic_dcb, aen_work.work); - adapter = dcb->adapter; - qlcnic_dcb_get_cee_cfg(adapter); - clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state); + qlcnic_dcb_get_cee_cfg(dcb); + clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state); } -static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter, - void *data) +static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { - struct qlcnic_dcb *dcb = adapter->dcb; - - if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } -static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { - struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability; + struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; - err = __qlcnic_dcb_get_capability(adapter, &mbx_out); + err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; @@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter) cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) - set_bit(__QLCNIC_DCB_STATE, &adapter->state); + set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } -static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter, +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 idx) { + struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_dcb_mbx_params mbx_out; int err, i, j, k, max_app, size; struct qlcnic_dcb_param *each; @@ -632,24 +629,23 @@ out: return err; } -static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter) +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { - struct qlcnic_dcb *dcb = adapter->dcb; int err; - err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0); + err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0); if (err) return err; - qlcnic_dcb_data_cee_param_map(adapter); + qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } -static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter, - bool flag) +static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag) { u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC); + struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_cmd_args cmd; int err; @@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter, return err; } -static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter, - void *data) +static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { - struct qlcnic_dcb *dcb = adapter->dcb; u32 *val = data; - if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state)) + if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; if (*val & BIT_8) - set_bit(__QLCNIC_DCB_STATE, &adapter->state); + set_bit(QLCNIC_DCB_STATE, &dcb->state); else - clear_bit(__QLCNIC_DCB_STATE, &adapter->state); + clear_bit(QLCNIC_DCB_STATE, &dcb->state); queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } @@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - return test_bit(__QLCNIC_DCB_STATE, &adapter->state); + return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state); } static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) { - memcpy(addr, netdev->dev_addr, netdev->addr_len); + memcpy(addr, netdev->perm_addr, netdev->addr_len); } static void @@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; *prio = *pgid = *bw_per = *up_tc_map = 0; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; @@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, *bw_pct = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; @@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio, *setting = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) || + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->pfc_mode_enable) return; @@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, { struct qlcnic_adapter *adapter = netdev_priv(netdev); - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; switch (capid) { @@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return -EINVAL; switch (attr) { @@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id) .protocol = id, }; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; return dcb_getapp(netdev, &app); @@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb *dcb = adapter->dcb; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &dcb->state)) return 0; return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; @@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; return cfg->capability.dcb_capability; @@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *type; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 1; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; @@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev, *app_count = 0; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; @@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev, struct qlcnic_dcb_app *app; int i, j; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; @@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev, struct qlcnic_dcb_cee *peer; u8 i, j, k, map; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; @@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev, pfc->pfc_en = 0; - if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state)) + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &cfg->type[QLC_DCB_PEER_IDX]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index b87ce9fb503..c04ae0cdc10 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -8,26 +8,29 @@ #ifndef __QLCNIC_DCBX_H #define __QLCNIC_DCBX_H -void qlcnic_clear_dcb_ops(struct qlcnic_adapter *); +#define QLCNIC_DCB_STATE 0 +#define QLCNIC_DCB_AEN_MODE 1 #ifdef CONFIG_QLCNIC_DCB -int __qlcnic_register_dcb(struct qlcnic_adapter *); +int qlcnic_register_dcb(struct qlcnic_adapter *); #else -static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter) +static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter) { return 0; } #endif +struct qlcnic_dcb; + struct qlcnic_dcb_ops { - void (*init_dcbnl_ops) (struct qlcnic_adapter *); - void (*free) (struct qlcnic_adapter *); - int (*attach) (struct qlcnic_adapter *); - int (*query_hw_capability) (struct qlcnic_adapter *, char *); - int (*get_hw_capability) (struct qlcnic_adapter *); - void (*get_info) (struct qlcnic_adapter *); - int (*query_cee_param) (struct qlcnic_adapter *, char *, u8); - int (*get_cee_cfg) (struct qlcnic_adapter *); - int (*register_aen) (struct qlcnic_adapter *, bool); - void (*handle_aen) (struct qlcnic_adapter *, void *); + int (*query_hw_capability) (struct qlcnic_dcb *, char *); + int (*get_hw_capability) (struct qlcnic_dcb *); + int (*query_cee_param) (struct qlcnic_dcb *, char *, u8); + void (*init_dcbnl_ops) (struct qlcnic_dcb *); + int (*register_aen) (struct qlcnic_dcb *, bool); + void (*aen_handler) (struct qlcnic_dcb *, void *); + int (*get_cee_cfg) (struct qlcnic_dcb *); + void (*get_info) (struct qlcnic_dcb *); + int (*attach) (struct qlcnic_dcb *); + void (*free) (struct qlcnic_dcb *); }; struct qlcnic_dcb { @@ -37,5 +40,85 @@ struct qlcnic_dcb { struct workqueue_struct *wq; struct qlcnic_dcb_ops *ops; struct qlcnic_dcb_cfg *cfg; + unsigned long state; }; + +static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) +{ + kfree(dcb); + dcb = NULL; +} + +static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_hw_capability) + return dcb->ops->get_hw_capability(dcb); + + return 0; +} + +static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->free) + dcb->ops->free(dcb); +} + +static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->attach) + return dcb->ops->attach(dcb); + + return 0; +} + +static inline int +qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) +{ + if (dcb && dcb->ops->query_hw_capability) + return dcb->ops->query_hw_capability(dcb, buf); + + return 0; +} + +static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_info) + dcb->ops->get_info(dcb); +} + +static inline int +qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) +{ + if (dcb && dcb->ops->query_cee_param) + return dcb->ops->query_cee_param(dcb, buf, type); + + return 0; +} + +static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_cee_cfg) + return dcb->ops->get_cee_cfg(dcb); + + return 0; +} + +static inline void +qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag) +{ + if (dcb && dcb->ops->register_aen) + dcb->ops->register_aen(dcb, flag); +} + +static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) +{ + if (dcb && dcb->ops->aen_handler) + dcb->ops->aen_handler(dcb, msg); +} + +static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->init_dcbnl_ops) + dcb->ops->init_dcbnl_ops(dcb); +} #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index ebe4c86e523..b36c02fafcf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -27,43 +27,36 @@ static const u32 qlcnic_fw_dump_level[] = { }; static const struct qlcnic_stats qlcnic_gstrings_stats[] = { + {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, + {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, {"xmit_called", QLC_SIZEOF(stats.xmitcalled), - QLC_OFF(stats.xmitcalled)}, + QLC_OFF(stats.xmitcalled)}, {"xmit_finished", QLC_SIZEOF(stats.xmitfinished), - QLC_OFF(stats.xmitfinished)}, - {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + QLC_OFF(stats.xmitfinished)}, + {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), + QLC_OFF(stats.tx_dma_map_error)}, + {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, - {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), + QLC_OFF(stats.rx_dma_map_error)}, {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, - {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, - {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, + {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, + {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, - {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, - {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), QLC_OFF(stats.skb_alloc_failure)}, - {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, - {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), - QLC_OFF(stats.rx_dma_map_error)}, - {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), - QLC_OFF(stats.tx_dma_map_error)}, {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), - QLC_OFF(stats.mac_filter_limit_overrun)}, + QLC_OFF(stats.mac_filter_limit_overrun)}, {"spurious intr", QLC_SIZEOF(stats.spurious_intr), QLC_OFF(stats.spurious_intr)}, }; static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { - "rx unicast frames", - "rx multicast frames", - "rx broadcast frames", - "rx dropped frames", - "rx errors", - "rx local frames", - "rx numbytes", "tx unicast frames", "tx multicast frames", "tx broadcast frames", @@ -71,6 +64,13 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { "tx errors", "tx local frames", "tx numbytes", + "rx unicast frames", + "rx multicast frames", + "rx broadcast frames", + "rx dropped frames", + "rx errors", + "rx local frames", + "rx numbytes", }; static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = { @@ -126,13 +126,16 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = { #define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) -static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = { +static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = { "xmit_on", "xmit_off", "xmit_called", "xmit_finished", + "tx_bytes", }; +#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings) + static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_rx_bytes", "ctx_rx_pkts", @@ -187,8 +190,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) return -1; } -#define QLCNIC_RING_REGS_COUNT 20 -#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32)) +#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 + #define QLCNIC_MAX_EEPROM_LEN 1024 static const u32 diag_registers[] = { @@ -219,7 +222,15 @@ static const u32 ext_diag_registers[] = { }; #define QLCNIC_MGMT_API_VERSION 2 -#define QLCNIC_ETHTOOL_REGS_VER 3 +#define QLCNIC_ETHTOOL_REGS_VER 4 + +static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter) +{ + int ring_regs_cnt = (adapter->drv_tx_rings * 5) + + (adapter->max_rds_rings * 2) + + (adapter->drv_sds_rings * 3) + 5; + return ring_regs_cnt * sizeof(u32); +} static int qlcnic_get_regs_len(struct net_device *dev) { @@ -231,7 +242,9 @@ static int qlcnic_get_regs_len(struct net_device *dev) else len = sizeof(ext_diag_registers) + sizeof(diag_registers); - return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1; + len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32)); + len += qlcnic_get_ring_regs_len(adapter); + return len; } static int qlcnic_get_eeprom_len(struct net_device *dev) @@ -493,6 +506,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_rings; + struct qlcnic_host_tx_ring *tx_ring; u32 *regs_buff = p; int ring, i = 0; @@ -512,21 +527,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return; - regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/ - - regs_buff[i++] = 1; /* No. of tx ring */ - regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); - regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer); - - regs_buff[i++] = 2; /* No. of rx ring */ - regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer); - regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer); + /* Marker btw regs and TX ring count */ + regs_buff[i++] = 0xFFEFCDAB; + + regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */ + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer)); + regs_buff[i++] = tx_ring->sw_consumer; + regs_buff[i++] = readl(tx_ring->crb_cmd_producer); + regs_buff[i++] = tx_ring->producer; + if (tx_ring->crb_intr_mask) + regs_buff[i++] = readl(tx_ring->crb_intr_mask); + else + regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED; + } - regs_buff[i++] = adapter->max_sds_rings; + regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */ + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_rings = &recv_ctx->rds_rings[ring]; + regs_buff[i++] = readl(rds_rings->crb_rcv_producer); + regs_buff[i++] = rds_rings->producer; + } - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */ + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &(recv_ctx->sds_rings[ring]); regs_buff[i++] = readl(sds_ring->crb_sts_consumer); + regs_buff[i++] = sds_ring->consumer; + regs_buff[i++] = readl(sds_ring->crb_intr_mask); } } @@ -635,46 +664,88 @@ qlcnic_set_ringparam(struct net_device *dev, return qlcnic_reset_context(adapter); } +static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, + u8 rx_ring, u8 tx_ring) +{ + if (rx_ring != 0) { + if (rx_ring > adapter->max_sds_rings) { + netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", + rx_ring, adapter->max_sds_rings); + return -EINVAL; + } + } + + if (tx_ring != 0) { + if (qlcnic_82xx_check(adapter) && + (tx_ring > adapter->max_tx_rings)) { + netdev_err(adapter->netdev, + "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", + tx_ring, adapter->max_tx_rings); + return -EINVAL; + } + + if (qlcnic_83xx_check(adapter) && + (tx_ring > QLCNIC_SINGLE_RING)) { + netdev_err(adapter->netdev, + "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n", + tx_ring, QLCNIC_SINGLE_RING); + return -EINVAL; + } + } + + return 0; +} + static void qlcnic_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct qlcnic_adapter *adapter = netdev_priv(dev); - int min; - - min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus()); - channel->max_rx = rounddown_pow_of_two(min); - channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus()); - channel->rx_count = adapter->max_sds_rings; - channel->tx_count = adapter->max_drv_tx_rings; + channel->max_rx = adapter->max_sds_rings; + channel->max_tx = adapter->max_tx_rings; + channel->rx_count = adapter->drv_sds_rings; + channel->tx_count = adapter->drv_tx_rings; } static int qlcnic_set_channels(struct net_device *dev, - struct ethtool_channels *channel) + struct ethtool_channels *channel) { struct qlcnic_adapter *adapter = netdev_priv(dev); int err; - int txq = 0; if (channel->other_count || channel->combined_count) return -EINVAL; + err = qlcnic_validate_ring_count(adapter, channel->rx_count, + channel->tx_count); + if (err) + return err; + if (channel->rx_count) { - err = qlcnic_validate_max_rss(adapter, channel->rx_count); - if (err) + err = qlcnic_validate_rings(adapter, channel->rx_count, + QLCNIC_RX_QUEUE); + if (err) { + netdev_err(dev, "Unable to configure %u SDS rings\n", + channel->rx_count); return err; + } } if (channel->tx_count) { - err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); - if (err) + err = qlcnic_validate_rings(adapter, channel->tx_count, + QLCNIC_TX_QUEUE); + if (err) { + netdev_err(dev, "Unable to configure %u Tx rings\n", + channel->tx_count); return err; - txq = channel->tx_count; + } } - err = qlcnic_set_max_rss(adapter, channel->rx_count, txq); - netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n", - adapter->max_sds_rings, adapter->max_drv_tx_rings); + err = qlcnic_setup_rings(adapter, channel->rx_count, + channel->tx_count); + netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n", + adapter->drv_sds_rings, adapter->drv_tx_rings); + return err; } @@ -876,7 +947,7 @@ static int qlcnic_irq_test(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; - int ret, max_sds_rings = adapter->max_sds_rings; + int ret, drv_sds_rings = adapter->drv_sds_rings; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_interrupt_test(netdev); @@ -905,10 +976,10 @@ done: qlcnic_free_mbx_args(&cmd); free_diag_res: - qlcnic_diag_free_res(netdev, max_sds_rings); + qlcnic_diag_free_res(netdev, drv_sds_rings); clear_diag_irq: - adapter->max_sds_rings = max_sds_rings; + adapter->drv_sds_rings = drv_sds_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; @@ -984,8 +1055,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) int qlcnic_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - int max_drv_tx_rings = adapter->max_drv_tx_rings; - int max_sds_rings = adapter->max_sds_rings; + int drv_tx_rings = adapter->drv_tx_rings; + int drv_sds_rings = adapter->drv_sds_rings; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_hardware_context *ahw = adapter->ahw; int loop = 0; @@ -1040,11 +1111,11 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode) qlcnic_clear_lb_mode(adapter, mode); free_res: - qlcnic_diag_free_res(netdev, max_sds_rings); + qlcnic_diag_free_res(netdev, drv_sds_rings); clear_it: - adapter->max_sds_rings = max_sds_rings; - adapter->max_drv_tx_rings = max_drv_tx_rings; + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } @@ -1097,11 +1168,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) QLCNIC_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings); - for (i = 0; i < adapter->max_drv_tx_rings; i++) { + num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings); + for (i = 0; i < adapter->drv_tx_rings; i++) { for (index = 0; index < num_stats; index++) { - sprintf(data, "tx_ring_%d %s", i, - qlcnic_tx_ring_stats_strings[index]); + sprintf(data, "tx_queue_%d %s", i, + qlcnic_tx_queue_stats_strings[index]); data += ETH_GSTRING_LEN; } } @@ -1199,6 +1270,36 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) return data; } +static void qlcnic_update_stats(struct qlcnic_adapter *adapter) +{ + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on; + adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off; + adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called; + adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished; + adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes; + } +} + +static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats) +{ + struct qlcnic_host_tx_ring *tx_ring; + + tx_ring = (struct qlcnic_host_tx_ring *)stats; + + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes); + + return data; +} + static void qlcnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -1206,19 +1307,20 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; - int index, ret, length, size, ring; + int index, ret, length, size, tx_size, ring; char *p; - memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64)); - for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; + + memset(data, 0, tx_size * sizeof(u64)); + for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { tx_ring = &adapter->tx_ring[ring]; - *data++ = tx_ring->xmit_on; - *data++ = tx_ring->xmit_off; - *data++ = tx_ring->xmit_called; - *data++ = tx_ring->xmit_finished; + data = qlcnic_fill_tx_queue_stats(data, tx_ring); + qlcnic_update_stats(adapter); } } + memset(data, 0, stats->n_stats * sizeof(u64)); length = QLCNIC_STATS_LEN; for (index = 0; index < length; index++) { @@ -1260,7 +1362,7 @@ static int qlcnic_set_led(struct net_device *dev, enum ethtool_phys_id_state state) { struct qlcnic_adapter *adapter = netdev_priv(dev); - int max_sds_rings = adapter->max_sds_rings; + int drv_sds_rings = adapter->drv_sds_rings; int err = -EIO, active = 1; if (qlcnic_83xx_check(adapter)) @@ -1318,7 +1420,7 @@ static int qlcnic_set_led(struct net_device *dev, } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) - qlcnic_diag_free_res(dev, max_sds_rings); + qlcnic_diag_free_res(dev, drv_sds_rings); if (!active || err) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); @@ -1659,7 +1761,6 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; bool valid_mask = false; int i, ret = 0; - u32 state; switch (val->flag) { case QLCNIC_FORCE_FW_DUMP_KEY: @@ -1712,9 +1813,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) case QLCNIC_SET_QUIESCENT: case QLCNIC_RESET_QUIESCENT: - state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) - netdev_info(netdev, "Device in FAILED state\n"); + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) + netdev_info(netdev, "Device is in non-operational state\n"); break; default: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index f8adc7b01f1..6f7f60c09f0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, mac_req = (struct qlcnic_mac_req *)&req.words[0]; mac_req->op = op; - memcpy(mac_req->mac_addr, addr, 6); + memcpy(mac_req->mac_addr, addr, ETH_ALEN); vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; vlan_req->vlan_id = cpu_to_le16(vlan_id); @@ -785,8 +785,6 @@ void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter) #define QLCNIC_ENABLE_IPV4_LRO 1 #define QLCNIC_ENABLE_IPV6_LRO 2 -#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8) -#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8) int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) { @@ -806,11 +804,10 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) word = 0; if (enable) { - word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; + word = QLCNIC_ENABLE_IPV4_LRO; if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAP2_HW_LRO_IPV6) - word |= QLCNIC_ENABLE_IPV6_LRO | - QLCNIC_NO_DEST_IPV6_CHECK; + word |= QLCNIC_ENABLE_IPV6_LRO; } req.words[0] = cpu_to_le64(word); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 272c356cf9b..13303e7d1ed 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -146,6 +146,12 @@ struct qlcnic_mailbox_metadata { #define QLCNIC_MBX_PORT_RSP_OK 0x1a #define QLCNIC_MBX_ASYNC_EVENT BIT_15 +/* Set HW Tx ring limit for 82xx adapter. */ +#define QLCNIC_MAX_HW_TX_RINGS 8 +#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4 +#define QLCNIC_MAX_TX_RINGS 8 +#define QLCNIC_MAX_SDS_RINGS 8 + struct qlcnic_pci_info; struct qlcnic_info; struct qlcnic_cmd_args; @@ -176,7 +182,7 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8); void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); -int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int); +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *); irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index 66c26cf7a2b..e9c21e5d0ca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -236,7 +236,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) spin_lock_init(&rds_ring->lock); } - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 11b4bb83b93..0149c949534 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -581,10 +581,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) goto drop_packet; } - if (qlcnic_check_multi_tx(adapter)) - tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; - else - tx_ring = &adapter->tx_ring[0]; + tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; @@ -607,8 +604,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_tx_start_queue(tx_ring->txq); } else { - adapter->stats.xmit_off++; - tx_ring->xmit_off++; + tx_ring->tx_stats.xmit_off++; return NETDEV_TX_BUSY; } } @@ -669,9 +665,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (adapter->drv_mac_learn) qlcnic_send_filter(adapter, first_desc, skb); - adapter->stats.txbytes += skb->len; - adapter->stats.xmitcalled++; - tx_ring->xmit_called++; + tx_ring->tx_stats.tx_bytes += skb->len; + tx_ring->tx_stats.xmit_called++; qlcnic_update_cmd_producer(tx_ring); @@ -789,6 +784,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; + if (!spin_trylock(&adapter->tx_clean_lock)) + return 1; + sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); @@ -805,8 +803,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, PCI_DMA_TODEVICE); frag->dma = 0ULL; } - adapter->stats.xmitfinished++; - tx_ring->xmit_finished++; + tx_ring->tx_stats.xmit_finished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } @@ -823,8 +820,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, netif_carrier_ok(netdev)) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_tx_wake_queue(tx_ring->txq); - adapter->stats.xmit_on++; - tx_ring->xmit_on++; + tx_ring->tx_stats.xmit_on++; } } adapter->tx_timeo_cnt = 0; @@ -844,6 +840,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); + spin_unlock(&adapter->tx_clean_lock); return done; } @@ -1011,7 +1008,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index, } break; case QLCNIC_C2H_OPCODE_GET_DCB_AEN: - qlcnic_dcb_handle_aen(adapter, (void *)&msg); + qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg); break; default: break; @@ -1463,18 +1460,18 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; - if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) return -ENOMEM; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && - (adapter->max_drv_tx_rings > 1)) { + (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, NAPI_POLL_WEIGHT); } else { - if (ring == (adapter->max_sds_rings - 1)) + if (ring == (adapter->drv_sds_rings - 1)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, NAPI_POLL_WEIGHT); @@ -1491,7 +1488,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, } if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, NAPI_POLL_WEIGHT); @@ -1508,7 +1505,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } @@ -1516,7 +1513,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) qlcnic_free_sds_rings(adapter->recv_ctx); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); } @@ -1535,7 +1532,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); qlcnic_enable_int(sds_ring); @@ -1544,8 +1541,8 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) if (qlcnic_check_multi_tx(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && !adapter->ahw->diag_test && - (adapter->max_drv_tx_rings > 1)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + (adapter->drv_tx_rings > QLCNIC_SINGLE_RING)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); qlcnic_enable_tx_intr(adapter, tx_ring); @@ -1563,7 +1560,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; qlcnic_disable_int(sds_ring); napi_synchronize(&sds_ring->napi); @@ -1573,7 +1570,7 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !adapter->ahw->diag_test && qlcnic_check_multi_tx(adapter)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_disable_tx_int(adapter, tx_ring); napi_synchronize(&tx_ring->napi); @@ -1911,7 +1908,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); if (adapter->flags & QLCNIC_MSIX_ENABLED) @@ -1920,7 +1917,7 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); qlcnic_83xx_enable_tx_intr(adapter, tx_ring); @@ -1938,7 +1935,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) qlcnic_83xx_disable_intr(adapter, sds_ring); @@ -1948,7 +1945,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_83xx_disable_tx_intr(adapter, tx_ring); napi_synchronize(&tx_ring->napi); @@ -1965,10 +1962,10 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) return -ENOMEM; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) { if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) @@ -1994,7 +1991,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll, @@ -2012,7 +2009,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } @@ -2021,7 +2018,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 21d00a0449a..05c1eef8df1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -548,36 +548,75 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .io_resume = qlcnic_82xx_io_resume, }; -static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter) +static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; - int num_tx_q; - if (ahw->msix_supported && + if (qlcnic_82xx_check(adapter) && (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) { - num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS, - num_online_cpus()); - if (num_tx_q > 1) { - test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, - &adapter->state); - adapter->max_drv_tx_rings = num_tx_q; - } + test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); + return 0; } else { - adapter->max_drv_tx_rings = 1; + return 1; } } +static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt, + int queue_type) +{ + int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS; + + if (queue_type == QLCNIC_RX_QUEUE) + max_rings = adapter->max_sds_rings; + else if (queue_type == QLCNIC_TX_QUEUE) + max_rings = adapter->max_tx_rings; + + num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(), + max_rings)); + + if (ring_cnt > num_rings) + return num_rings; + else + return ring_cnt; +} + +void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt) +{ + /* 83xx adapter does not have max_tx_rings intialized in probe */ + if (adapter->max_tx_rings) + adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt, + QLCNIC_TX_QUEUE); + else + adapter->drv_tx_rings = tx_cnt; + + dev_info(&adapter->pdev->dev, "Set %d Tx rings\n", + adapter->drv_tx_rings); +} + +void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt) +{ + /* 83xx adapter does not have max_sds_rings intialized in probe */ + if (adapter->max_sds_rings) + adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt, + QLCNIC_RX_QUEUE); + else + adapter->drv_sds_rings = rx_cnt; + + dev_info(&adapter->pdev->dev, "Set %d SDS rings\n", + adapter->drv_sds_rings); +} + int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; - int max_tx_rings, max_sds_rings, tx_vector; + int drv_tx_rings, drv_sds_rings, tx_vector; int err = -1, i; if (adapter->flags & QLCNIC_TX_INTR_SHARED) { - max_tx_rings = 0; + drv_tx_rings = 0; tx_vector = 0; } else { - max_tx_rings = adapter->max_drv_tx_rings; + drv_tx_rings = adapter->drv_tx_rings; tx_vector = 1; } @@ -589,7 +628,7 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) return -ENOMEM; } - adapter->max_sds_rings = 1; + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); if (adapter->ahw->msix_supported) { @@ -602,18 +641,18 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) if (qlcnic_83xx_check(adapter)) { adapter->ahw->num_msix = num_msix; /* subtract mail box and tx ring vectors */ - adapter->max_sds_rings = num_msix - - max_tx_rings - 1; + adapter->drv_sds_rings = num_msix - + drv_tx_rings - 1; } else { adapter->ahw->num_msix = num_msix; if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && - (adapter->max_drv_tx_rings > 1)) - max_sds_rings = num_msix - max_tx_rings; + (adapter->drv_tx_rings > 1)) + drv_sds_rings = num_msix - drv_tx_rings; else - max_sds_rings = num_msix; + drv_sds_rings = num_msix; - adapter->max_sds_rings = max_sds_rings; + adapter->drv_sds_rings = drv_sds_rings; } dev_info(&pdev->dev, "using msi-x interrupts\n"); return err; @@ -624,13 +663,13 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) if (qlcnic_83xx_check(adapter)) { if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector)) return err; - err -= (max_tx_rings + 1); + err -= drv_tx_rings + 1; num_msix = rounddown_pow_of_two(err); - num_msix += (max_tx_rings + 1); + num_msix += drv_tx_rings + 1; } else { num_msix = rounddown_pow_of_two(err); if (qlcnic_check_multi_tx(adapter)) - num_msix += max_tx_rings; + num_msix += drv_tx_rings; } if (num_msix) { @@ -683,25 +722,14 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) return err; } -int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq) +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) { - struct qlcnic_hardware_context *ahw = adapter->ahw; int num_msix, err = 0; - if (!num_intr) - num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS; + num_msix = adapter->drv_sds_rings; - if (ahw->msix_supported) { - num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), - num_intr)); - if (qlcnic_check_multi_tx(adapter)) { - if (txq) - adapter->max_drv_tx_rings = txq; - num_msix += adapter->max_drv_tx_rings; - } - } else { - num_msix = 1; - } + if (qlcnic_check_multi_tx(adapter)) + num_msix += adapter->drv_tx_rings; err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) @@ -819,7 +847,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter) int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) { struct qlcnic_pci_info *pci_info; - int i, ret = 0, j = 0; + int i, id = 0, ret = 0, j = 0; u16 act_pci_func; u8 pfn; @@ -860,7 +888,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) continue; if (qlcnic_port_eswitch_cfg_capability(adapter)) { - if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn)) + if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn, + &id)) adapter->npars[j].eswitch_status = true; else continue; @@ -875,15 +904,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) adapter->npars[j].min_bw = pci_info[i].tx_min_bw; adapter->npars[j].max_bw = pci_info[i].tx_max_bw; + memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN); j++; } - if (qlcnic_82xx_check(adapter)) { + /* Update eSwitch status for adapters without per port eSwitch + * configuration capability + */ + if (!qlcnic_port_eswitch_cfg_capability(adapter)) { for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; - } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) { - for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) - qlcnic_enable_eswitch(adapter, i, 1); } kfree(pci_info); @@ -1131,18 +1161,25 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) if (err == -EIO) return err; adapter->ahw->extra_capability[0] = temp; + } else { + adapter->ahw->extra_capability[0] = 0; } + adapter->ahw->max_mac_filters = nic_info.max_mac_filters; adapter->ahw->max_mtu = nic_info.max_mtu; - /* Disable NPAR for 83XX */ - if (qlcnic_83xx_check(adapter)) - return err; - - if (adapter->ahw->capabilities & BIT_6) + if (adapter->ahw->capabilities & BIT_6) { adapter->flags |= QLCNIC_ESWITCH_ENABLED; - else + adapter->ahw->nic_mode = QLCNIC_VNIC_MODE; + adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; + + dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n"); + } else { + adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; + adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + } return err; } @@ -1290,6 +1327,8 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) "HAL Version: %d, Privileged function\n", adapter->ahw->fw_hal_version); } + } else { + adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; } adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; @@ -1549,7 +1588,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) if (qlcnic_82xx_check(adapter) || (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED))) { - num_sds_rings = adapter->max_sds_rings; + num_sds_rings = adapter->drv_sds_rings; for (ring = 0; ring < num_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_82xx_check(adapter) && @@ -1583,7 +1622,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) (adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED))) { handler = qlcnic_msix_tx_intr; - for (ring = 0; ring < adapter->max_drv_tx_rings; + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; snprintf(tx_ring->name, sizeof(tx_ring->name), @@ -1611,7 +1650,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter) if (qlcnic_82xx_check(adapter) || (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED))) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; free_irq(sds_ring->irq, sds_ring); } @@ -1620,7 +1659,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter) !(adapter->flags & QLCNIC_TX_INTR_SHARED)) || (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { - for (ring = 0; ring < adapter->max_drv_tx_rings; + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring->irq) @@ -1674,7 +1713,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) adapter->ahw->linkup = 0; - if (adapter->max_sds_rings > 1) + if (adapter->drv_sds_rings > 1) qlcnic_config_rss(adapter, 1); qlcnic_config_intr_coalesce(adapter); @@ -1716,6 +1755,7 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); smp_mb(); + spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); adapter->ahw->linkup = 0; netif_tx_disable(netdev); @@ -1734,8 +1774,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) qlcnic_reset_rx_buffers_list(adapter); - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) + for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); + spin_unlock(&adapter->tx_clean_lock); } /* Usage: During suspend and firmware recovery module */ @@ -1811,16 +1852,16 @@ void qlcnic_detach(struct qlcnic_adapter *adapter) adapter->is_up = 0; } -void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) +void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; - int max_tx_rings = adapter->max_drv_tx_rings; + int drv_tx_rings = adapter->drv_tx_rings; int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_disable_int(sds_ring); } @@ -1831,8 +1872,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings) qlcnic_detach(adapter); adapter->ahw->diag_test = 0; - adapter->max_sds_rings = max_sds_rings; - adapter->max_drv_tx_rings = max_tx_rings; + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; if (qlcnic_attach(adapter)) goto out; @@ -1898,10 +1939,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) qlcnic_detach(adapter); - adapter->max_sds_rings = 1; + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; + adapter->drv_tx_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; - adapter->max_drv_tx_rings = 1; ret = qlcnic_attach(adapter); if (ret) { @@ -1922,7 +1963,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test) } if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_enable_int(sds_ring); } @@ -2069,7 +2110,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, return err; } - qlcnic_dcb_init_dcbnl_ops(adapter); + qlcnic_dcb_init_dcbnl_ops(adapter->dcb); return 0; } @@ -2095,7 +2136,7 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter) int ring; struct qlcnic_host_tx_ring *tx_ring; - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring && tx_ring->cmd_buf_arr != NULL) { vfree(tx_ring->cmd_buf_arr); @@ -2113,14 +2154,14 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_cmd_buffer *cmd_buf_arr; - tx_ring = kcalloc(adapter->max_drv_tx_rings, + tx_ring = kcalloc(adapter->drv_tx_rings, sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL); if (tx_ring == NULL) return -ENOMEM; adapter->tx_ring = tx_ring; - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, ring); @@ -2135,11 +2176,11 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, if (qlcnic_83xx_check(adapter) || (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { - for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_ring->adapter = adapter; if (adapter->flags & QLCNIC_MSIX_ENABLED) { - index = adapter->max_sds_rings + ring; + index = adapter->drv_sds_rings + ring; vector = adapter->msix_entries[index].vector; tx_ring->irq = vector; } @@ -2159,22 +2200,10 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) else if (qlcnic_83xx_check(adapter)) fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; - if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) && - (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)) + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER) qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); } -static int qlcnic_register_dcb(struct qlcnic_adapter *adapter) -{ - return __qlcnic_register_dcb(adapter); -} - -void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter) -{ - kfree(adapter->dcb); - adapter->dcb = NULL; -} - static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2183,6 +2212,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct qlcnic_hardware_context *ahw; int err, pci_using_dac = -1; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ + struct qlcnic_dcb *dcb; if (pdev->is_virtfn) return -ENODEV; @@ -2257,7 +2287,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = qlcnic_alloc_adapter_resources(adapter); if (err) - goto err_out_free_netdev; + goto err_out_free_wq; adapter->dev_rst_time = jiffies; adapter->ahw->revision_id = pdev->revision; @@ -2269,6 +2299,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); + spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); qlcnic_register_dcb(adapter); @@ -2283,38 +2314,51 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_maintenance_mode; } - qlcnic_get_multiq_capability(adapter); - - if ((adapter->ahw->act_pci_func > 2) && - qlcnic_check_multi_tx(adapter)) { - adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS; - dev_info(&adapter->pdev->dev, - "vNIC mode enabled, Set max TX rings = %d\n", - adapter->max_drv_tx_rings); + /* compute and set default and max tx/sds rings */ + if (adapter->ahw->msix_supported) { + if (qlcnic_check_multi_tx_capability(adapter) == 1) + qlcnic_set_tx_ring_count(adapter, + QLCNIC_SINGLE_RING); + else + qlcnic_set_tx_ring_count(adapter, + QLCNIC_DEF_TX_RINGS); + qlcnic_set_sds_ring_count(adapter, + QLCNIC_DEF_SDS_RINGS); + } else { + qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); + qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); } - if (!qlcnic_check_multi_tx(adapter)) { - clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); - adapter->max_drv_tx_rings = 1; - } err = qlcnic_setup_idc_param(adapter); if (err) goto err_out_free_hw; adapter->flags |= QLCNIC_NEED_FLR; - if (adapter->dcb && qlcnic_dcb_attach(adapter)) - qlcnic_clear_dcb_ops(adapter); + dcb = adapter->dcb; + if (dcb && qlcnic_dcb_attach(dcb)) + qlcnic_clear_dcb_ops(dcb); } else if (qlcnic_83xx_check(adapter)) { - adapter->max_drv_tx_rings = 1; qlcnic_83xx_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; err = qlcnic_83xx_init(adapter, pci_using_dac); if (err) { - dev_err(&pdev->dev, "%s: failed\n", __func__); - goto err_out_free_hw; + switch (err) { + case -ENOTRECOVERABLE: + dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n"); + dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n"); + goto err_out_free_hw; + case -ENOMEM: + dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); + goto err_out_free_hw; + default: + dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n"); + dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n"); + goto err_out_maintenance_mode; + } } + if (qlcnic_sriov_vf_check(adapter)) return 0; } else { @@ -2342,7 +2386,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - err = qlcnic_setup_intr(adapter, 0, 0); + err = qlcnic_setup_intr(adapter); if (err) { dev_err(&pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; @@ -2396,6 +2440,9 @@ err_out_disable_msi: err_out_free_hw: qlcnic_free_adapter_resources(adapter); +err_out_free_wq: + destroy_workqueue(adapter->qlcnic_wq); + err_out_free_netdev: free_netdev(netdev); @@ -2409,13 +2456,20 @@ err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return err; err_out_maintenance_mode: + set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); netdev->netdev_ops = &qlcnic_netdev_failed_ops; SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); + ahw->port_type = QLCNIC_XGBE; + + if (qlcnic_83xx_check(adapter)) + adapter->tgt_status_reg = NULL; + else + ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS; + err = register_netdev(netdev); if (err) { @@ -2446,7 +2500,7 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_cancel_idc_work(adapter); ahw = adapter->ahw; - qlcnic_dcb_free(adapter); + qlcnic_dcb_free(adapter->dcb); unregister_netdev(netdev); qlcnic_sriov_cleanup(adapter); @@ -2485,7 +2539,6 @@ static void qlcnic_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); if (adapter->qlcnic_wq) { destroy_workqueue(adapter->qlcnic_wq); @@ -2538,12 +2591,11 @@ static int qlcnic_resume(struct pci_dev *pdev) static int qlcnic_open(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - u32 state; int err; - state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { - netdev_err(netdev, "%s: Device is in FAILED state\n", __func__); + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + netdev_err(netdev, "%s: Device is in non-operational state\n", + __func__); return -EIO; } @@ -2705,24 +2757,21 @@ static void qlcnic_tx_timeout(struct net_device *netdev) QLCNIC_FORCE_FW_DUMP_KEY); } else { netdev_info(netdev, "Tx timeout, reset adapter context.\n"); - if (qlcnic_82xx_check(adapter)) { - for (ring = 0; ring < adapter->max_drv_tx_rings; - ring++) { - tx_ring = &adapter->tx_ring[ring]; - dev_info(&netdev->dev, "ring=%d\n", ring); - dev_info(&netdev->dev, "crb_intr_mask=%d\n", - readl(tx_ring->crb_intr_mask)); - dev_info(&netdev->dev, "producer=%d\n", - readl(tx_ring->crb_cmd_producer)); - dev_info(&netdev->dev, "sw_consumer = %d\n", - tx_ring->sw_consumer); - dev_info(&netdev->dev, "hw_consumer = %d\n", - le32_to_cpu(*(tx_ring->hw_consumer))); - dev_info(&netdev->dev, "xmit-on=%llu\n", - tx_ring->xmit_on); - dev_info(&netdev->dev, "xmit-off=%llu\n", - tx_ring->xmit_off); - } + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netdev_info(netdev, "Tx ring=%d\n", ring); + netdev_info(netdev, + "crb_intr_mask=%d, producer=%d, sw_consumer=%d, hw_consumer=%d\n", + readl(tx_ring->crb_intr_mask), + readl(tx_ring->crb_cmd_producer), + tx_ring->sw_consumer, + le32_to_cpu(*(tx_ring->hw_consumer))); + netdev_info(netdev, + "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n", + tx_ring->tx_stats.xmit_finished, + tx_ring->tx_stats.xmit_called, + tx_ring->tx_stats.xmit_on, + tx_ring->tx_stats.xmit_off); } adapter->ahw->reset_context = 1; } @@ -2836,7 +2885,7 @@ static void qlcnic_poll_controller(struct net_device *netdev) struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; qlcnic_intr(adapter->irq, sds_ring); } @@ -3256,8 +3305,9 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) return; state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { - netdev_err(adapter->netdev, "%s: Device is in FAILED state\n", + + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + netdev_err(adapter->netdev, "%s: Device is in non-operational state\n", __func__); qlcnic_api_unlock(adapter); @@ -3324,7 +3374,7 @@ qlcnic_attach_work(struct work_struct *work) return; } attach: - qlcnic_dcb_get_info(adapter); + qlcnic_dcb_get_info(adapter->dcb); if (netif_running(netdev)) { if (qlcnic_up(adapter, netdev)) @@ -3349,6 +3399,8 @@ done: static int qlcnic_check_health(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; u32 state = 0, heartbeat; u32 peg_status; int err = 0; @@ -3373,7 +3425,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) if (adapter->need_fw_reset) goto detach; - if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) + if (ahw->reset_context && qlcnic_auto_fw_reset) qlcnic_reset_hw_context(adapter); return 0; @@ -3416,6 +3468,9 @@ detach: qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); QLCDB(adapter, DRV, "fw recovery scheduled.\n"); + } else if (!qlcnic_auto_fw_reset && fw_dump->enable && + adapter->flags & QLCNIC_FW_RESET_OWNER) { + qlcnic_dump_fw(adapter); } return 1; @@ -3497,7 +3552,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev) qlcnic_clr_drv_state(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - err = qlcnic_setup_intr(adapter, 0, 0); + err = qlcnic_setup_intr(adapter); if (err) { kfree(adapter->msix_entries); @@ -3642,136 +3697,90 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) return err; } -int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq) +int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, + int queue_type) { struct net_device *netdev = adapter->netdev; - u8 max_hw = QLCNIC_MAX_TX_RINGS; - u32 max_allowed; + u8 max_hw_rings = 0; + char buf[8]; + int cur_rings; - if (!qlcnic_82xx_check(adapter)) { - netdev_err(netdev, "No Multi TX-Q support\n"); - return -EINVAL; + if (queue_type == QLCNIC_RX_QUEUE) { + max_hw_rings = adapter->max_sds_rings; + cur_rings = adapter->drv_sds_rings; + strcpy(buf, "SDS"); + } else if (queue_type == QLCNIC_TX_QUEUE) { + max_hw_rings = adapter->max_tx_rings; + cur_rings = adapter->drv_tx_rings; + strcpy(buf, "Tx"); } if (!qlcnic_use_msi_x && !qlcnic_use_msi) { - netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); + netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); return -EINVAL; } - if (!qlcnic_check_multi_tx(adapter)) { - netdev_err(netdev, "No Multi TX-Q support\n"); + if (adapter->flags & QLCNIC_MSI_ENABLED) { + netdev_err(netdev, "No RSS/TSS support in MSI mode\n"); return -EINVAL; } - if (txq > QLCNIC_MAX_TX_RINGS) { - netdev_err(netdev, "Invalid ring count\n"); + if (ring_cnt < 2) { + netdev_err(netdev, + "%s rings value should not be lower than 2\n", buf); return -EINVAL; } - max_allowed = rounddown_pow_of_two(min_t(int, max_hw, - num_online_cpus())); - if ((txq > max_allowed) || !is_power_of_2(txq)) { - if (!is_power_of_2(txq)) - netdev_err(netdev, - "TX queue should be a power of 2\n"); - if (txq > num_online_cpus()) - netdev_err(netdev, - "Tx queue should not be higher than [%u], number of online CPUs in the system\n", - num_online_cpus()); - netdev_err(netdev, "Unable to configure %u Tx rings\n", txq); + if (!is_power_of_2(ring_cnt)) { + netdev_err(netdev, "%s rings value should be a power of 2\n", + buf); return -EINVAL; } - return 0; -} - -int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter, - __u32 val) -{ - struct net_device *netdev = adapter->netdev; - u8 max_hw = adapter->ahw->max_rx_ques; - u32 max_allowed; - - if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && - !qlcnic_use_msi) { - netdev_err(netdev, "No RSS support in INT-x mode\n"); - return -EINVAL; + if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) && + !qlcnic_check_multi_tx(adapter)) { + netdev_err(netdev, "No Multi Tx queue support\n"); + return -EINVAL; } - if (val > QLCNIC_MAX_SDS_RINGS) { - netdev_err(netdev, "RSS value should not be higher than %u\n", - QLCNIC_MAX_SDS_RINGS); + if (ring_cnt > num_online_cpus()) { + netdev_err(netdev, + "%s value[%u] should not be higher than, number of online CPUs\n", + buf, num_online_cpus()); return -EINVAL; } - max_allowed = rounddown_pow_of_two(min_t(int, max_hw, - num_online_cpus())); - if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) { - if (!is_power_of_2(val)) - netdev_err(netdev, "RSS value should be a power of 2\n"); - - if (val < 2) - netdev_err(netdev, "RSS value should not be lower than 2\n"); - - if (val > max_hw) - netdev_err(netdev, - "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n", - max_hw); - - if (val > num_online_cpus()) - netdev_err(netdev, - "RSS value should not be higher than[%u], number of online CPUs in the system\n", - num_online_cpus()); - - netdev_err(netdev, "Unable to configure %u RSS rings\n", val); - - return -EINVAL; - } return 0; } -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq) +int qlcnic_setup_rings(struct qlcnic_adapter *adapter, u8 rx_cnt, u8 tx_cnt) { - int err; struct net_device *netdev = adapter->netdev; - int num_msix; + int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; - if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && - !qlcnic_use_msi) { - netdev_err(netdev, "No RSS support in INT-x mode\n"); - return -EINVAL; - } - netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); - if (qlcnic_82xx_check(adapter)) { - if (txq != 0) - adapter->max_drv_tx_rings = txq; - - if (qlcnic_check_multi_tx(adapter) && - (txq > adapter->max_drv_tx_rings)) - num_msix = adapter->max_drv_tx_rings; - else - num_msix = data; - } - if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_enable_mbx_poll(adapter); } - netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings); - qlcnic_teardown_intr(adapter); - err = qlcnic_setup_intr(adapter, data, txq); + /* compute and set default and max tx/sds rings */ + qlcnic_set_tx_ring_count(adapter, tx_cnt); + qlcnic_set_sds_ring_count(adapter, rx_cnt); + + netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); + + err = qlcnic_setup_intr(adapter); if (err) { kfree(adapter->msix_entries); netdev_err(netdev, "failed to setup interrupt\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 15513608d48..7763962e2ec 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) } if (ops_index == ops_cnt) { - dev_info(&adapter->pdev->dev, - "Invalid entry type %d, exiting dump\n", + dev_info(dev, "Skipping unknown entry opcode %d\n", entry->hdr.type); - goto error; + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; } /* Collect dump for this entry */ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); - if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump)) + if (!qlcnic_valid_dump_entry(dev, entry, dump)) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; } - if (dump_size != buf_offset) { - dev_info(&adapter->pdev->dev, - "Captured(%d) and expected size(%d) do not match\n", - buf_offset, dump_size); - goto error; - } else { - fw_dump->clr = 1; - snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", - adapter->netdev->name); - dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n", - adapter->netdev->name, fw_dump->size); - /* Send a udev event to notify availability of FW dump */ - kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg); - return 0; - } -error: + + fw_dump->clr = 1; + snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); + dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n", + adapter->netdev->name, fw_dump->size, tmpl_hdr->size); + /* Send a udev event to notify availability of FW dump */ + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); + if (fw_dump->use_pex_dma) dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE, fw_dump->dma_buffer, fw_dump->phys_addr); - vfree(fw_dump->data); - return -EINVAL; + + return 0; } void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 392b9bd12b4..21a4b274d2e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, int pci_using_dac) { + struct qlcnic_dcb *dcb; int err; INIT_LIST_HEAD(&adapter->vf_mc_list); @@ -507,7 +508,11 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, dev_warn(&adapter->pdev->dev, "Device does not support MSI interrupts\n"); - err = qlcnic_setup_intr(adapter, 1, 0); + /* compute and set default and max tx/sds rings */ + qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); + qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); + + err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; @@ -533,8 +538,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, if (err) goto err_out_send_channel_term; - if (adapter->dcb && qlcnic_dcb_attach(adapter)) - qlcnic_clear_dcb_ops(adapter); + dcb = adapter->dcb; + + if (dcb && qlcnic_dcb_attach(dcb)) + qlcnic_clear_dcb_ops(dcb); err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); if (err) @@ -1577,7 +1584,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) if (err) goto err_out_term_channel; - qlcnic_dcb_get_info(adapter); + qlcnic_dcb_get_info(adapter->dcb); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 019f4377307..1a9f8a400e5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -156,7 +156,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, const char *buf, size_t len) { struct qlcnic_hardware_context *ahw = adapter->ahw; - int err, max_sds_rings = adapter->max_sds_rings; + int err, drv_sds_rings = adapter->drv_sds_rings; u16 beacon; u8 h_beacon_state, b_state, b_rate; @@ -211,7 +211,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) - qlcnic_diag_free_res(adapter->netdev, max_sds_rings); + qlcnic_diag_free_res(adapter->netdev, drv_sds_rings); out: if (!ahw->beacon_state) @@ -1272,7 +1272,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 state; if (device_create_bin_file(dev, &bin_attr_port_stats)) dev_info(dev, "failed to create port stats sysfs entry"); @@ -1286,8 +1285,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); - state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) return; if (device_create_bin_file(dev, &bin_attr_pci_config)) @@ -1313,7 +1311,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 state; device_remove_bin_file(dev, &bin_attr_port_stats); @@ -1323,8 +1320,7 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); - state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); - if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) return; device_remove_bin_file(dev, &bin_attr_pci_config); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 89943377846..03517478e58 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.32" +#define DRV_VERSION "1.00.00.34" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ @@ -2206,14 +2206,14 @@ extern char qlge_driver_name[]; extern const char qlge_driver_version[]; extern const struct ethtool_ops qlge_ethtool_ops; -extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); -extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); -extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value); -extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); -extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id); +int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); +void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); +int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value); +int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); +int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id); void ql_queue_fw_error(struct ql_adapter *qdev); void ql_mpi_work(struct work_struct *work); void ql_mpi_reset_work(struct work_struct *work); @@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev); int ql_pause_mpi_risc(struct ql_adapter *qdev); int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count); -int ql_core_dump(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump); +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, + int word_count); +int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump); int ql_mb_about_fw(struct ql_adapter *qdev); int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); @@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev); int ql_mb_set_port_cfg(struct ql_adapter *qdev); int ql_wait_fifo_empty(struct ql_adapter *qdev); void ql_get_dump(struct ql_adapter *qdev, void *buff); -void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump); +void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump); netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); int ql_own_firmware(struct ql_adapter *qdev); @@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); /* #define QL_OB_DUMP */ #ifdef QL_REG_DUMP -extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); -extern void ql_dump_routing_entries(struct ql_adapter *qdev); -extern void ql_dump_regs(struct ql_adapter *qdev); +void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); +void ql_dump_routing_entries(struct ql_adapter *qdev); +void ql_dump_regs(struct ql_adapter *qdev); #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) @@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev); #endif #ifdef QL_STAT_DUMP -extern void ql_dump_stat(struct ql_adapter *qdev); +void ql_dump_stat(struct ql_adapter *qdev); #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) #else #define QL_DUMP_STAT(qdev) #endif #ifdef QL_DEV_DUMP -extern void ql_dump_qdev(struct ql_adapter *qdev); +void ql_dump_qdev(struct ql_adapter *qdev); #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) #else #define QL_DUMP_QDEV(qdev) #endif #ifdef QL_CB_DUMP -extern void ql_dump_wqicb(struct wqicb *wqicb); -extern void ql_dump_tx_ring(struct tx_ring *tx_ring); -extern void ql_dump_ricb(struct ricb *ricb); -extern void ql_dump_cqicb(struct cqicb *cqicb); -extern void ql_dump_rx_ring(struct rx_ring *rx_ring); -extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); +void ql_dump_wqicb(struct wqicb *wqicb); +void ql_dump_tx_ring(struct tx_ring *tx_ring); +void ql_dump_ricb(struct ricb *ricb); +void ql_dump_cqicb(struct cqicb *cqicb); +void ql_dump_rx_ring(struct rx_ring *rx_ring); +void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) @@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); #endif #ifdef QL_OB_DUMP -extern void ql_dump_tx_desc(struct tx_buf_desc *tbd); -extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); -extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); +void ql_dump_tx_desc(struct tx_buf_desc *tbd); +void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); +void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) #else @@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); #endif #ifdef QL_IB_DUMP -extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); +void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) #else #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) #endif #ifdef QL_ALL_DUMP -extern void ql_dump_all(struct ql_adapter *qdev); +void ql_dump_all(struct ql_adapter *qdev); #define QL_DUMP_ALL(qdev) ql_dump_all(qdev) #else #define QL_DUMP_ALL(qdev) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 0780e039b27..8dee1beb985 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { }; #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) +#define QLGE_RCV_MAC_ERR_STATS 7 static int ql_update_ring_coalescing(struct ql_adapter *qdev) { @@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev) iter++; } + /* Update receive mac error statistics */ + iter += QLGE_RCV_MAC_ERR_STATS; + /* * Get Per-priority TX pause frame counter statistics. */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 2553cf4503b..449f506d2e8 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); -static int ql_wol(struct ql_adapter *qdev); -static void qlge_set_multicast_list(struct net_device *ndev); +static int ql_wol(struct ql_adapter *); +static void qlge_set_multicast_list(struct net_device *); +static int ql_adapter_down(struct ql_adapter *); +static int ql_adapter_up(struct ql_adapter *); /* This hardware semaphore causes exclusive access to * resources shared between the NIC driver, MPI firmware, @@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, } } +/** + * ql_update_mac_hdr_len - helper routine to update the mac header length + * based on vlan tags if present + */ +static void ql_update_mac_hdr_len(struct ql_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp, + void *page, size_t *len) +{ + u16 *tags; + + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + return; + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) { + tags = (u16 *)page; + /* Look for stacked vlan tags in ethertype field */ + if (tags[6] == ETH_P_8021Q && + tags[8] == ETH_P_8021Q) + *len += 2 * VLAN_HLEN; + else + *len += VLAN_HLEN; + } +} + /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, struct rx_ring *rx_ring, @@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, void *addr; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; + size_t hlen = ETH_HLEN; skb = netdev_alloc_skb(ndev, length); if (!skb) { @@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, goto err_out; } + /* Update the MAC header length*/ + ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); + /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ - if (skb->len > ndev->mtu + ETH_HLEN) { + if (skb->len > ndev->mtu + hlen) { netif_err(qdev, drv, qdev->ndev, "Segment too small, dropping.\n"); rx_ring->rx_dropped++; goto err_out; } - memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); + memcpy(skb_put(skb, hlen), addr, hlen); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset+ETH_HLEN, - length-ETH_HLEN); - skb->len += length-ETH_HLEN; - skb->data_len += length-ETH_HLEN; - skb->truesize += length-ETH_HLEN; + lbq_desc->p.pg_chunk.offset + hlen, + length - hlen); + skb->len += length - hlen; + skb->data_len += length - hlen; + skb->truesize += length - hlen; rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; @@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = - (struct iphdr *) ((u8 *)addr + ETH_HLEN); + (struct iphdr *)((u8 *)addr + hlen); if (!(iph->frag_off & htons(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, struct bq_desc *sbq_desc; struct sk_buff *skb = NULL; u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + size_t hlen = ETH_HLEN; /* * Handle the header buffer if present. @@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->data_len += length; skb->truesize += length; length -= length; - __pskb_pull_tail(skb, - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); + ql_update_mac_hdr_len(qdev, ib_mac_rsp, + lbq_desc->p.pg_chunk.va, + &hlen); + __pskb_pull_tail(skb, hlen); } } else { /* @@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, length -= size; i++; } - __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? - VLAN_ETH_HLEN : ETH_HLEN); + ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, + &hlen); + __pskb_pull_tail(skb, hlen); } return skb; } @@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb_record_rx_queue(skb, rx_ring->cq_id); - if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) + if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); @@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp) { u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? + u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && + (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? ((le16_to_cpu(ib_mac_rsp->vlan_id) & IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; @@ -2310,17 +2343,44 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) } } +/** + * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter + * based on the features to enable/disable hardware vlan accel + */ +static int qlge_update_hw_vlan_features(struct net_device *ndev, + netdev_features_t features) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status = 0; + + status = ql_adapter_down(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring down the adapter\n"); + return status; + } + + /* update the features with resent change */ + ndev->features = features; + + status = ql_adapter_up(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring up the adapter\n"); + return status; + } + return status; +} + static netdev_features_t qlge_fix_features(struct net_device *ndev, netdev_features_t features) { - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_CTAG_RX) - features |= NETIF_F_HW_VLAN_CTAG_TX; - else - features &= ~NETIF_F_HW_VLAN_CTAG_TX; + int err; + + /* Update the behavior of vlan accel in the adapter */ + err = qlge_update_hw_vlan_features(ndev, features); + if (err) + return err; return features; } @@ -3704,8 +3764,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) ql_write32(qdev, SYS, mask | value); /* Set the default queue, and VLAN behavior. */ - value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; - mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); + value = NIC_RCV_CFG_DFQ; + mask = NIC_RCV_CFG_DFQ_MASK; + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { + value |= NIC_RCV_CFG_RV; + mask |= (NIC_RCV_CFG_RV << 16); + } ql_write32(qdev, NIC_RCV_CFG, (mask | value)); /* Set the MPI interrupt to enabled. */ @@ -4505,7 +4569,6 @@ static void ql_release_all(struct pci_dev *pdev) iounmap(qdev->doorbell_area); vfree(qdev->mpi_coredump); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); } static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, @@ -4692,11 +4755,15 @@ static int qlge_probe(struct pci_dev *pdev, qdev = netdev_priv(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO | NETIF_F_TSO_ECN | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM; - ndev->features = ndev->hw_features | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM; + ndev->features = ndev->hw_features; ndev->vlan_features = ndev->hw_features; if (test_bit(QL_DMA64, &qdev->flags)) diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index e9dc84943cf..1e49ec5b223 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1231,7 +1231,6 @@ err_out_mdio: mdiobus_free(lp->mii_bus); err_out_unmap: netif_napi_del(&lp->napi); - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); @@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index d2e591955bd..737c1a881f7 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp) le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); - bytes_compl += skb->len; - pkts_compl++; - if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { netif_dbg(cp, tx_err, cp->dev, @@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp) netif_dbg(cp, tx_done, cp->dev, "tx done, slot %d\n", tx_tail); } + bytes_compl += skb->len; + pkts_compl++; dev_kfree_skb_irq(skb); } @@ -2052,7 +2051,6 @@ static void cp_remove_one (struct pci_dev *pdev) pci_release_regions(pdev); pci_clear_mwi(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); free_netdev(dev); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 3ccedeb8aba..da5972eefdd 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev) pci_release_regions (pdev); free_netdev(dev); - pci_set_drvdata (pdev, NULL); } @@ -791,6 +790,9 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev) pci_set_master (pdev); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + retry: /* PIO bar register comes first. */ bar = !use_io; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3397cee8977..c737f0ea5de 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x14, 0x9065); rtl_writephy(tp, 0x14, 0x1065); + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + rtl_writephy(tp, 0x1f, 0x0000); } @@ -6811,7 +6816,6 @@ static void rtl_remove_one(struct pci_dev *pdev) rtl_disable_msi(pdev, tp); rtl8169_release_board(pdev, dev, tp->mmio_addr); - pci_set_drvdata(pdev, NULL); } static const struct net_device_ops rtl_netdev_ops = { diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5cd831ebfa8..d256ce19d4d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = { .register_type = SH_ETH_REG_FAST_SH4, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, - .rmcr_value = 0x00000001, + .rmcr_value = RMCR_RNC, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .fdr_value = 0x0000072f, - .rmcr_value = 0x00000001, + .rmcr_value = RMCR_RNC, .irq_flags = IRQF_SHARED, .apr = 1, @@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + .rmcr_value = RMCR_RNC, .apr = 1, .mpr = 1, .tpauser = 1, .bculr = 1, .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .tsu = 1, @@ -868,7 +872,7 @@ static void update_mac_address(struct net_device *ndev) static void read_mac_address(struct net_device *ndev, unsigned char *mac) { if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { - memcpy(ndev->dev_addr, mac, 6); + memcpy(ndev->dev_addr, mac, ETH_ALEN); } else { ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; @@ -2659,6 +2663,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); + if (!pd) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -EINVAL; + goto out_release; + } + /* get PHY ID */ mdp->phy_id = pd->phy; mdp->phy_interface = pd->phy_interface; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a0db02c63b1..f32c1692d31 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -321,6 +321,9 @@ enum TD_STS_BIT { #define TD_TFP (TD_TFP1|TD_TFP0) /* RMCR */ +enum RMCR_BIT { + RMCR_RNC = 0x00000001, +}; #define DEFAULT_RMCR_VALUE 0x00000000 /* ECMR */ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 9f18ae984f9..676c3c057bf 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx) return rc; } +#ifdef EFX_USE_PIO + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); + unsigned int i; + int rc; + + BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); + + for (i = 0; i < nic_data->n_piobufs; i++) { + MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[i]); + rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), + NULL, 0, NULL); + WARN_ON(rc); + } + + nic_data->n_piobufs = 0; +} + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); + unsigned int i; + size_t outlen; + int rc = 0; + + BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); + + for (i = 0; i < n; i++) { + rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + break; + if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { + rc = -EIO; + break; + } + nic_data->piobuf_handle[i] = + MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); + netif_dbg(efx, probe, efx->net_dev, + "allocated PIO buffer %u handle %x\n", i, + nic_data->piobuf_handle[i]); + } + + nic_data->n_piobufs = i; + if (rc) + efx_ef10_free_piobufs(efx); + return rc; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, + max(MC_CMD_LINK_PIOBUF_IN_LEN, + MC_CMD_UNLINK_PIOBUF_IN_LEN)); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned int offset, index; + int rc; + + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); + BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); + + /* Link a buffer to each VI in the write-combining mapping */ + for (index = 0; index < nic_data->n_piobufs; ++index) { + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + nic_data->pio_write_vi_base + index, index, + rc); + goto fail; + } + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u\n", + nic_data->pio_write_vi_base + index, index); + } + + /* Link a buffer to each TX queue */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + /* We assign the PIO buffers to queues in + * reverse order to allow for the following + * special case. + */ + offset = ((efx->tx_channel_offset + efx->n_tx_channels - + tx_queue->channel->channel - 1) * + efx_piobuf_size); + index = offset / ER_DZ_TX_PIOBUF_SIZE; + offset = offset % ER_DZ_TX_PIOBUF_SIZE; + + /* When the host page size is 4K, the first + * host page in the WC mapping may be within + * the same VI page as the last TX queue. We + * can only link one buffer to each VI. + */ + if (tx_queue->queue == nic_data->pio_write_vi_base) { + BUG_ON(index != 0); + rc = 0; + } else { + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_TXQ_INSTANCE, + tx_queue->queue); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + + if (rc) { + /* This is non-fatal; the TX path just + * won't use PIO for this queue + */ + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + tx_queue->queue, index, rc); + tx_queue->piobuf = NULL; + } else { + tx_queue->piobuf = + nic_data->pio_write_base + + index * EFX_VI_PAGE_SIZE + offset; + tx_queue->piobuf_offset = offset; + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u offset %x addr %p\n", + tx_queue->queue, index, + tx_queue->piobuf_offset, + tx_queue->piobuf); + } + } + } + + return 0; + +fail: + while (index--) { + MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, + inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + return rc; +} + +#else /* !EFX_USE_PIO */ + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + return n == 0 ? 0 : -ENOBUFS; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + return 0; +} + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ +} + +#endif /* EFX_USE_PIO */ + static void efx_ef10_remove(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx) /* This needs to be after efx_ptp_remove_channel() with no filters */ efx_ef10_rx_free_indir_table(efx); + if (nic_data->wc_membase) + iounmap(nic_data->wc_membase); + rc = efx_ef10_free_vis(efx); WARN_ON(rc != 0); + if (!nic_data->must_restore_piobufs) + efx_ef10_free_piobufs(efx); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); @@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx, return 0; } +/* Note that the failure path of this function does not free + * resources, as this will be done by efx_ef10_remove(). + */ static int efx_ef10_dimension_resources(struct efx_nic *efx) { - unsigned int n_vis = - max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int uc_mem_map_size, wc_mem_map_size; + unsigned int min_vis, pio_write_vi_base, max_vis; + void __iomem *membase; + int rc; + + min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + +#ifdef EFX_USE_PIO + /* Try to allocate PIO buffers if wanted and if the full + * number of PIO buffers would be sufficient to allocate one + * copy-buffer per TX channel. Failure is non-fatal, as there + * are only a small number of PIO buffers shared between all + * functions of the controller. + */ + if (efx_piobuf_size != 0 && + ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + efx->n_tx_channels) { + unsigned int n_piobufs = + DIV_ROUND_UP(efx->n_tx_channels, + ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + + rc = efx_ef10_alloc_piobufs(efx, n_piobufs); + if (rc) + netif_err(efx, probe, efx->net_dev, + "failed to allocate PIO buffers (%d)\n", rc); + else + netif_dbg(efx, probe, efx->net_dev, + "allocated %u PIO buffers\n", n_piobufs); + } +#else + nic_data->n_piobufs = 0; +#endif + + /* PIO buffers should be mapped with write-combining enabled, + * and we want to make single UC and WC mappings rather than + * several of each (in fact that's the only option if host + * page size is >4K). So we may allocate some extra VIs just + * for writing PIO buffers through. + */ + uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + + ER_DZ_TX_PIOBUF); + if (nic_data->n_piobufs) { + pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; + wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + + nic_data->n_piobufs) * + EFX_VI_PAGE_SIZE) - + uc_mem_map_size); + max_vis = pio_write_vi_base + nic_data->n_piobufs; + } else { + pio_write_vi_base = 0; + wc_mem_map_size = 0; + max_vis = min_vis; + } + + /* In case the last attached driver failed to free VIs, do it now */ + rc = efx_ef10_free_vis(efx); + if (rc != 0) + return rc; + + rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); + if (rc != 0) + return rc; + + /* If we didn't get enough VIs to map all the PIO buffers, free the + * PIO buffers + */ + if (nic_data->n_piobufs && + nic_data->n_allocated_vis < + pio_write_vi_base + nic_data->n_piobufs) { + netif_dbg(efx, probe, efx->net_dev, + "%u VIs are not sufficient to map %u PIO buffers\n", + nic_data->n_allocated_vis, nic_data->n_piobufs); + efx_ef10_free_piobufs(efx); + } + + /* Shrink the original UC mapping of the memory BAR */ + membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); + if (!membase) { + netif_err(efx, probe, efx->net_dev, + "could not shrink memory BAR to %x\n", + uc_mem_map_size); + return -ENOMEM; + } + iounmap(efx->membase); + efx->membase = membase; + + /* Set up the WC mapping if needed */ + if (wc_mem_map_size) { + nic_data->wc_membase = ioremap_wc(efx->membase_phys + + uc_mem_map_size, + wc_mem_map_size); + if (!nic_data->wc_membase) { + netif_err(efx, probe, efx->net_dev, + "could not allocate WC mapping of size %x\n", + wc_mem_map_size); + return -ENOMEM; + } + nic_data->pio_write_vi_base = pio_write_vi_base; + nic_data->pio_write_base = + nic_data->wc_membase + + (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - + uc_mem_map_size); - return efx_ef10_alloc_vis(efx, n_vis, n_vis); + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + } + + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", + &efx->membase_phys, efx->membase, uc_mem_map_size, + nic_data->wc_membase, wc_mem_map_size); + + return 0; } static int efx_ef10_init_nic(struct efx_nic *efx) @@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) nic_data->must_realloc_vis = false; } + if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { + rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); + if (rc == 0) { + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + } + + /* Log an error on failure, but this is non-fatal */ + if (rc) + netif_err(efx, drv, efx->net_dev, + "failed to restore PIO buffers (%d)\n", rc); + nic_data->must_restore_piobufs = false; + } + efx_ef10_rx_push_indir_table(efx); return 0; } @@ -444,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), + EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), }; #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ @@ -498,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ (1ULL << EF10_STAT_rx_length_error)) -#if BITS_PER_LONG == 64 -#define STAT_MASK_BITMAP(bits) (bits) -#else -#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 -#endif - -static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) -{ - static const unsigned long hunt_40g_stat_mask[] = { - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | - HUNT_40G_EXTRA_STAT_MASK) - }; - static const unsigned long hunt_10g_only_stat_mask[] = { - STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | - HUNT_10G_ONLY_STAT_MASK) - }; +/* These statistics are only provided if the firmware supports the + * capability PM_AND_RXDP_COUNTERS. + */ +#define HUNT_PM_AND_RXDP_STAT_MASK ( \ + (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ + (1ULL << EF10_STAT_rx_dp_emerg_wait)) + +static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) +{ + u64 raw_mask = HUNT_COMMON_STAT_MASK; u32 port_caps = efx_mcdi_phy_get_caps(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - return hunt_40g_stat_mask; + raw_mask |= HUNT_40G_EXTRA_STAT_MASK; else - return hunt_10g_only_stat_mask; + raw_mask |= HUNT_10G_ONLY_STAT_MASK; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) + raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; + + return raw_mask; +} + +static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) +{ + u64 raw_mask = efx_ef10_raw_stat_mask(efx); + +#if BITS_PER_LONG == 64 + mask[0] = raw_mask; +#else + mask[0] = raw_mask & 0xffffffff; + mask[1] = raw_mask >> 32; +#endif } static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) { + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + + efx_ef10_get_stat_mask(efx, mask); return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, - efx_ef10_stat_mask(efx), names); + mask, names); } static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - const unsigned long *stats_mask = efx_ef10_stat_mask(efx); + DECLARE_BITMAP(mask, EF10_STAT_COUNT); __le64 generation_start, generation_end; u64 *stats = nic_data->stats; __le64 *dma_stats; + efx_ef10_get_stat_mask(efx, mask); + dma_stats = efx->stats_buffer.addr; nic_data = efx->nic_data; @@ -543,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); - efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, stats, efx->stats_buffer.addr, false); + rmb(); generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; if (generation_end != generation_start) return -EAGAIN; @@ -563,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats) { - const unsigned long *mask = efx_ef10_stat_mask(efx); + DECLARE_BITMAP(mask, EF10_STAT_COUNT); struct efx_ef10_nic_data *nic_data = efx->nic_data; u64 *stats = nic_data->stats; size_t stats_count = 0, index; int retry; + efx_ef10_get_stat_mask(efx, mask); + /* If we're unlucky enough to read statistics during the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ @@ -716,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) /* All our allocations have been reset */ nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; + nic_data->must_restore_piobufs = true; nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; /* The datapath firmware might have been changed */ @@ -2137,7 +2491,7 @@ out_unlock: return rc; } -void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) +static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) { /* no need to do anything here on EF10 */ } diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h index b3f4e3755fd..207ac9a1e3d 100644 --- a/drivers/net/ethernet/sfc/ef10_regs.h +++ b/drivers/net/ethernet/sfc/ef10_regs.h @@ -315,6 +315,7 @@ #define ESF_DZ_TX_PIO_TYPE_WIDTH 1 #define ESF_DZ_TX_PIO_OPT_LBN 60 #define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_PIO 1 #define ESF_DZ_TX_PIO_CONT_LBN 59 #define ESF_DZ_TX_PIO_CONT_WIDTH 1 #define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 07c9bc4c61b..fd844b53e38 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - NET_IP_ALIGN + efx->rx_dma_len); + efx->rx_ip_align + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = efx->type->always_rx_scatter; efx->rx_buffer_order = 0; @@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx) WARN_ON(channel->rx_pkt_n_frags); } + efx_ptp_start_datapath(efx); + if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); } @@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx) EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); + efx_ptp_stop_datapath(efx); + /* Stop RX refill */ efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) @@ -1121,7 +1125,7 @@ static int efx_init_io(struct efx_nic *efx) */ while (dma_mask > 0x7fffffffUL) { if (dma_supported(&pci_dev->dev, dma_mask)) { - rc = dma_set_mask(&pci_dev->dev, dma_mask); + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); if (rc == 0) break; } @@ -1134,16 +1138,6 @@ static int efx_init_io(struct efx_nic *efx) } netif_dbg(efx, probe, efx->net_dev, "using DMA mask %llx\n", (unsigned long long) dma_mask); - rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask); - if (rc) { - /* dma_set_coherent_mask() is not *allowed* to - * fail with a mask that dma_set_mask() accepted, - * but just in case... - */ - netif_err(efx, probe, efx->net_dev, - "failed to set consistent DMA mask\n"); - goto fail2; - } efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); @@ -2550,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx, efx->net_dev = net_dev; efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; efx->rx_packet_hash_offset = efx->type->rx_hash_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 34d00f5771f..b8235ee5d7d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -18,37 +18,36 @@ #define EFX_MEM_BAR 2 /* TX */ -extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); -extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); -extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); -extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); -extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); -extern netdev_tx_t -efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); -extern netdev_tx_t -efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); -extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); -extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); +void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); +void efx_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); +extern unsigned int efx_piobuf_size; /* RX */ -extern void efx_rx_config_page_split(struct efx_nic *efx); -extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); -extern void efx_rx_slow_fill(unsigned long context); -extern void __efx_rx_packet(struct efx_channel *channel); -extern void efx_rx_packet(struct efx_rx_queue *rx_queue, - unsigned int index, unsigned int n_frags, - unsigned int len, u16 flags); +void efx_rx_config_page_split(struct efx_nic *efx); +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); +void efx_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); +void efx_rx_slow_fill(unsigned long context); +void __efx_rx_packet(struct efx_channel *channel); +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); static inline void efx_rx_flush_packet(struct efx_channel *channel) { if (channel->rx_pkt_n_frags) __efx_rx_packet(channel); } -extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_DMAQ_SIZE 4096UL #define EFX_DEFAULT_DMAQ_SIZE 1024UL @@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, return efx->type->filter_get_rx_ids(efx, priority, buf, size); } #ifdef CONFIG_RFS_ACCEL -extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id); -extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); static inline void efx_filter_rfs_expire(struct efx_channel *channel) { if (channel->rfs_filters_added >= 60 && @@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} #define efx_filter_rfs_enabled() 0 #endif -extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); /* Channels */ -extern int efx_channel_dummy_op_int(struct efx_channel *channel); -extern void efx_channel_dummy_op_void(struct efx_channel *channel); -extern int -efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); +int efx_channel_dummy_op_int(struct efx_channel *channel); +void efx_channel_dummy_op_void(struct efx_channel *channel); +int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); /* Ports */ -extern int efx_reconfigure_port(struct efx_nic *efx); -extern int __efx_reconfigure_port(struct efx_nic *efx); +int efx_reconfigure_port(struct efx_nic *efx); +int __efx_reconfigure_port(struct efx_nic *efx); /* Ethtool support */ extern const struct ethtool_ops efx_ethtool_ops; /* Reset handling */ -extern int efx_reset(struct efx_nic *efx, enum reset_type method); -extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); -extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); -extern int efx_try_recovery(struct efx_nic *efx); +int efx_reset(struct efx_nic *efx, enum reset_type method); +void efx_reset_down(struct efx_nic *efx, enum reset_type method); +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_try_recovery(struct efx_nic *efx); /* Global */ -extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); -extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, - unsigned int rx_usecs, bool rx_adaptive, - bool rx_may_override_tx); -extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, - unsigned int *rx_usecs, bool *rx_adaptive); +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); /* Dummy PHY ops for PHY drivers */ -extern int efx_port_dummy_op_int(struct efx_nic *efx); -extern void efx_port_dummy_op_void(struct efx_nic *efx); - +int efx_port_dummy_op_int(struct efx_nic *efx); +void efx_port_dummy_op_void(struct efx_nic *efx); /* MTD */ #ifdef CONFIG_SFC_MTD -extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, - size_t n_parts, size_t sizeof_part); +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); static inline int efx_mtd_probe(struct efx_nic *efx) { return efx->type->mtd_probe(efx); } -extern void efx_mtd_rename(struct efx_nic *efx); -extern void efx_mtd_remove(struct efx_nic *efx); +void efx_mtd_rename(struct efx_nic *efx); +void efx_mtd_remove(struct efx_nic *efx); #else static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } static inline void efx_mtd_rename(struct efx_nic *efx) {} @@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) efx_schedule_channel(channel); } -extern void efx_link_status_changed(struct efx_nic *efx); -extern void efx_link_set_advertising(struct efx_nic *efx, u32); -extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); +void efx_link_status_changed(struct efx_nic *efx); +void efx_link_set_advertising(struct efx_nic *efx, u32); +void efx_link_set_wanted_fc(struct efx_nic *efx, u8); static inline void efx_device_detach_sync(struct efx_nic *efx) { diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 5b471cf5c32..1f529fa2edb 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pushes), + EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), @@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, return 0; } -int efx_ethtool_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info) +static int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 96ce507d860..4d3f119b67b 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -66,6 +66,11 @@ #define EFX_USE_QWORD_IO 1 #endif +/* PIO is a win only if write-combining is possible */ +#ifdef ARCH_HAS_IOREMAP_WC +#define EFX_USE_PIO 1 +#endif + #ifdef EFX_USE_QWORD_IO static inline void _efx_writeq(struct efx_nic *efx, __le64 value, unsigned int reg) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index c082562dbf4..4b0bd8a1514 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -50,6 +50,7 @@ struct efx_mcdi_async_param { static void efx_mcdi_timeout_async(unsigned long context); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { @@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } } +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); @@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = jiffies; - rmb(); - if (efx->type->mcdi_poll_response(efx)) + if (efx_mcdi_poll_once(efx)) break; if (time_after(time, finish)) return -ETIMEDOUT; } - spin_lock_bh(&mcdi->iface_lock); - efx_mcdi_read_response_header(efx); - spin_unlock_bh(&mcdi->iface_lock); - /* Return rc=0 like wait_event_timeout() */ return 0; } @@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, rc = efx_mcdi_await_completion(efx); if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. @@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); + } - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d mode %d timed out\n", - cmd, (int)inlen, mcdi->mode); - } else { + if (rc == 0) { size_t hdr_len, data_len; /* At the very least we need a memory barrier here to ensure @@ -963,7 +982,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached) { MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); size_t outlen; int rc; @@ -981,6 +1000,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, goto fail; } + /* We currently assume we have control of the external link + * and are completely trusted by firmware. Abort probing + * if that's not true for this function. + */ + if (driver_operating && + outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN && + (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { + netif_err(efx, probe, efx->net_dev, + "This driver version only supports one function per port\n"); + return -ENODEV; + } + if (was_attached != NULL) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); return 0; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index c34d0d4e10e..15816cacb54 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -75,6 +75,8 @@ struct efx_mcdi_mon { unsigned long last_update; struct device *device; struct efx_mcdi_mon_attribute *attrs; + struct attribute_group group; + const struct attribute_group *groups[2]; unsigned int n_attrs; }; @@ -108,38 +110,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) } #endif -extern int efx_mcdi_init(struct efx_nic *efx); -extern void efx_mcdi_fini(struct efx_nic *efx); +int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_fini(struct efx_nic *efx); -extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen, +int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); + +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen); +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); -extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen); -extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual); - typedef void efx_mcdi_async_completer(struct efx_nic *efx, unsigned long cookie, int rc, efx_dword_t *outbuf, size_t outlen_actual); -extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, - size_t outlen, - efx_mcdi_async_completer *complete, - unsigned long cookie); +int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); -extern int efx_mcdi_poll_reboot(struct efx_nic *efx); -extern void efx_mcdi_mode_poll(struct efx_nic *efx); -extern void efx_mcdi_mode_event(struct efx_nic *efx); -extern void efx_mcdi_flush_async(struct efx_nic *efx); +int efx_mcdi_poll_reboot(struct efx_nic *efx); +void efx_mcdi_mode_poll(struct efx_nic *efx); +void efx_mcdi_mode_event(struct efx_nic *efx); +void efx_mcdi_flush_async(struct efx_nic *efx); -extern void efx_mcdi_process_event(struct efx_channel *channel, - efx_qword_t *event); -extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); /* We expect that 16- and 32-bit fields in MCDI requests and responses * are appropriately aligned, but 64-bit fields are only @@ -275,55 +274,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define MCDI_EVENT_FIELD(_ev, _field) \ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) -extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); -extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, - u16 *fw_subtype_list, u32 *capabilities); -extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, - u32 dest_evq); -extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); -extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, - size_t *size_out, size_t *erase_size_out, - bool *protected_out); -extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); -extern int efx_mcdi_handle_assertion(struct efx_nic *efx); -extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); -extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, - const u8 *mac, int *id_out); -extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); -extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); -extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); -extern int efx_mcdi_flush_rxqs(struct efx_nic *efx); -extern int efx_mcdi_port_probe(struct efx_nic *efx); -extern void efx_mcdi_port_remove(struct efx_nic *efx); -extern int efx_mcdi_port_reconfigure(struct efx_nic *efx); -extern int efx_mcdi_port_get_number(struct efx_nic *efx); -extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); -extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); -extern int efx_mcdi_set_mac(struct efx_nic *efx); +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out); +int efx_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_mcdi_handle_assertion(struct efx_nic *efx); +void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_mcdi_flush_rxqs(struct efx_nic *efx); +int efx_mcdi_port_probe(struct efx_nic *efx); +void efx_mcdi_port_remove(struct efx_nic *efx); +int efx_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_mcdi_port_get_number(struct efx_nic *efx); +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +int efx_mcdi_set_mac(struct efx_nic *efx); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) -extern void efx_mcdi_mac_start_stats(struct efx_nic *efx); -extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx); -extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx); -extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); -extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); +void efx_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_mcdi_mac_stop_stats(struct efx_nic *efx); +bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); #ifdef CONFIG_SFC_MCDI_MON -extern int efx_mcdi_mon_probe(struct efx_nic *efx); -extern void efx_mcdi_mon_remove(struct efx_nic *efx); +int efx_mcdi_mon_probe(struct efx_nic *efx); +void efx_mcdi_mon_remove(struct efx_nic *efx); #else static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} #endif #ifdef CONFIG_SFC_MTD -extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, u8 *buffer); -extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); -extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, const u8 *buffer); -extern int efx_mcdi_mtd_sync(struct mtd_info *mtd); -extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); #endif #endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4cc5d95b2a5..d72ad4fc361 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) return rc; } -static ssize_t efx_mcdi_mon_show_name(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", KBUILD_MODNAME); -} - static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, efx_dword_t *entry) { - struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_nic *efx = dev_get_drvdata(dev->parent); struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); int rc; @@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev, efx_mcdi_sensor_type[mon_attr->type].label); } -static int +static void efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, ssize_t (*reader)(struct device *, struct device_attribute *, char *), @@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; - int rc; strlcpy(attr->name, name, sizeof(attr->name)); attr->index = index; @@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, attr->dev_attr.attr.name = attr->name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = reader; - rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); - if (rc == 0) - ++hwmon->n_attrs; - return rc; + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; } int efx_mcdi_mon_probe(struct efx_nic *efx) @@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) efx_mcdi_mon_update(efx); /* Allocate space for the maximum possible number of - * attributes for this set of sensors: name of the driver plus + * attributes for this set of sensors: * value, min, max, crit, alarm and label for each sensor. */ - n_attrs = 1 + 6 * n_sensors; + n_attrs = 6 * n_sensors; hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); if (!hwmon->attrs) { rc = -ENOMEM; goto fail; } - - hwmon->device = hwmon_device_register(&efx->pci_dev->dev); - if (IS_ERR(hwmon->device)) { - rc = PTR_ERR(hwmon->device); + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!hwmon->group.attrs) { + rc = -ENOMEM; goto fail; } - rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); - if (rc) - goto fail; - for (i = 0, j = -1, type = -1; ; i++) { enum efx_hwmon_type hwmon_type; const char *hwmon_prefix; @@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) page = type / 32; j = -1; if (page == n_pages) - return 0; + goto hwmon_register; MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); @@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) if (min1 != max1) { snprintf(name, sizeof(name), "%s%u_input", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_value, i, type, 0); - if (rc) - goto fail; if (hwmon_type != EFX_HWMON_POWER) { snprintf(name, sizeof(name), "%s%u_min", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, min1); - if (rc) - goto fail; } snprintf(name, sizeof(name), "%s%u_max", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, max1); - if (rc) - goto fail; if (min2 != max2) { /* Assume max2 is critical value. @@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) */ snprintf(name, sizeof(name), "%s%u_crit", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, max2); - if (rc) - goto fail; } } snprintf(name, sizeof(name), "%s%u_alarm", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_alarm, i, type, 0); - if (rc) - goto fail; if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && efx_mcdi_sensor_type[type].label) { snprintf(name, sizeof(name), "%s%u_label", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_label, i, type, 0); - if (rc) - goto fail; } } +hwmon_register: + hwmon->groups[0] = &hwmon->group; + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, + KBUILD_MODNAME, NULL, + hwmon->groups); + if (IS_ERR(hwmon->device)) { + rc = PTR_ERR(hwmon->device); + goto fail; + } + + return 0; + fail: efx_mcdi_mon_remove(efx); return rc; @@ -516,14 +501,11 @@ fail: void efx_mcdi_mon_remove(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - unsigned int i; - for (i = 0; i < hwmon->n_attrs; i++) - device_remove_file(&efx->pci_dev->dev, - &hwmon->attrs[i].dev_attr); - kfree(hwmon->attrs); if (hwmon->device) hwmon_device_unregister(hwmon->device); + kfree(hwmon->attrs); + kfree(hwmon->group.attrs); efx_nic_free_buffer(efx, &hwmon->dma_buf); } diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index b5cf62492f8..e0a63ddb7a6 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2574,8 +2574,58 @@ #define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ #define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ #define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ -#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ -#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an emergency descriptor fetch was + * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f #define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ #define MC_CMD_MAC_NSTATS 0x61 /* enum */ @@ -5065,6 +5115,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h index 16824fecc5e..4a2dc4c281b 100644 --- a/drivers/net/ethernet/sfc/mdio_10g.h +++ b/drivers/net/ethernet/sfc/mdio_10g.h @@ -20,7 +20,7 @@ static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } -extern unsigned efx_mdio_id_oui(u32 id); +unsigned efx_mdio_id_oui(u32 id); static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr) { @@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx) return sync; } -extern const char *efx_mdio_mmd_name(int mmd); +const char *efx_mdio_mmd_name(int mmd); /* * Reset a specific MMD and wait for reset to clear. @@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd); * * This function will sleep */ -extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, - int spins, int spintime); +int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime); /* As efx_mdio_check_mmd but for multiple MMDs */ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); /* Check the link status of specified mmds in bit mask */ -extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); +bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); /* Generic transmit disable support though PMAPMD */ -extern void efx_mdio_transmit_disable(struct efx_nic *efx); +void efx_mdio_transmit_disable(struct efx_nic *efx); /* Generic part of reconfigure: set/clear loopback bits */ -extern void efx_mdio_phy_reconfigure(struct efx_nic *efx); +void efx_mdio_phy_reconfigure(struct efx_nic *efx); /* Set the power state of the specified MMDs */ -extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx, - int low_power, unsigned int mmd_mask); +void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power, + unsigned int mmd_mask); /* Set (some of) the PHY settings over MDIO */ -extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); +int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); /* Push advertising flags and restart autonegotiation */ -extern void efx_mdio_an_reconfigure(struct efx_nic *efx); +void efx_mdio_an_reconfigure(struct efx_nic *efx); /* Get pause parameters from AN if available (otherwise return * requested pause parameters) @@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx); u8 efx_mdio_get_pause(struct efx_nic *efx); /* Wait for specified MMDs to exit reset within a timeout */ -extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx, - unsigned int mmd_mask); +int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask); /* Set or clear flag, debouncing */ static inline void @@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr, } /* Liveness self-test for MDIO PHYs */ -extern int efx_mdio_test_alive(struct efx_nic *efx); +int efx_mdio_test_alive(struct efx_nic *efx); #endif /* EFX_MDIO_10G_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b172ed13305..542a0d252ae 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -141,6 +141,8 @@ struct efx_special_buffer { * @len: Length of this fragment. * This field is zero when the queue slot is empty. * @unmap_len: Length of this fragment to unmap + * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. + * Only valid if @unmap_len != 0. */ struct efx_tx_buffer { union { @@ -154,6 +156,7 @@ struct efx_tx_buffer { unsigned short flags; unsigned short len; unsigned short unmap_len; + unsigned short dma_offset; }; #define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ @@ -182,6 +185,9 @@ struct efx_tx_buffer { * @tsoh_page: Array of pages of TSO header buffers * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. + * @piobuf: PIO buffer region for this TX queue (shared with its partner). + * Size of the region is efx_piobuf_size. + * @piobuf_offset: Buffer offset to be specified in PIO descriptors * @initialised: Has hardware queue been initialised? * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. @@ -209,6 +215,7 @@ struct efx_tx_buffer { * blocks * @tso_packets: Number of packets via the TSO xmit path * @pushes: Number of times the TX push feature has been used + * @pio_packets: Number of times the TX PIO feature has been used * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -223,6 +230,8 @@ struct efx_tx_queue { struct efx_buffer *tsoh_page; struct efx_special_buffer txd; unsigned int ptr_mask; + void __iomem *piobuf; + unsigned int piobuf_offset; bool initialised; /* Members used mainly on the completion path */ @@ -238,6 +247,7 @@ struct efx_tx_queue { unsigned int tso_long_headers; unsigned int tso_packets; unsigned int pushes; + unsigned int pio_packets; /* Members shared between paths and sometimes updated */ unsigned int empty_read_count ____cacheline_aligned_in_smp; @@ -673,6 +683,8 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN * @rx_dma_len: Current maximum RX DMA length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_truesize: Amortised allocation size of an RX buffer, @@ -806,6 +818,7 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; + unsigned int rx_ip_align; unsigned int rx_dma_len; unsigned int rx_buffer_order; unsigned int rx_buffer_truesize; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index e7dbd2dd202..9c90bf56090 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -19,6 +19,7 @@ #include "bitfield.h" #include "efx.h" #include "nic.h" +#include "ef10_regs.h" #include "farch_regs.h" #include "io.h" #include "workarounds.h" @@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) /* Register dump */ -#define REGISTER_REVISION_A 1 -#define REGISTER_REVISION_B 2 -#define REGISTER_REVISION_C 3 -#define REGISTER_REVISION_Z 3 /* latest revision */ +#define REGISTER_REVISION_FA 1 +#define REGISTER_REVISION_FB 2 +#define REGISTER_REVISION_FC 3 +#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ +#define REGISTER_REVISION_ED 4 +#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ struct efx_nic_reg { u32 offset:24; - u32 min_revision:2, max_revision:2; + u32 min_revision:3, max_revision:3; }; -#define REGISTER(name, min_rev, max_rev) { \ - FR_ ## min_rev ## max_rev ## _ ## name, \ - REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ +#define REGISTER(name, arch, min_rev, max_rev) { \ + arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev \ } -#define REGISTER_AA(name) REGISTER(name, A, A) -#define REGISTER_AB(name) REGISTER(name, A, B) -#define REGISTER_AZ(name) REGISTER(name, A, Z) -#define REGISTER_BB(name) REGISTER(name, B, B) -#define REGISTER_BZ(name) REGISTER(name, B, Z) -#define REGISTER_CZ(name) REGISTER(name, C, Z) +#define REGISTER_AA(name) REGISTER(name, F, A, A) +#define REGISTER_AB(name) REGISTER(name, F, A, B) +#define REGISTER_AZ(name) REGISTER(name, F, A, Z) +#define REGISTER_BB(name) REGISTER(name, F, B, B) +#define REGISTER_BZ(name) REGISTER(name, F, B, Z) +#define REGISTER_CZ(name) REGISTER(name, F, C, Z) +#define REGISTER_DZ(name) REGISTER(name, E, D, Z) static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AZ(ADR_REGION), @@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AB(XX_TXDRV_CTL), /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ /* XX_CORE_STAT is partly RC */ + REGISTER_DZ(BIU_HW_REV_ID), + REGISTER_DZ(MC_DB_LWRD), + REGISTER_DZ(MC_DB_HWRD), }; struct efx_nic_reg_table { u32 offset:24; - u32 min_revision:2, max_revision:2; + u32 min_revision:3, max_revision:3; u32 step:6, rows:21; }; -#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ +#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ offset, \ - REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev, \ step, rows \ } -#define REGISTER_TABLE(name, min_rev, max_rev) \ +#define REGISTER_TABLE(name, arch, min_rev, max_rev) \ REGISTER_TABLE_DIMENSIONS( \ - name, FR_ ## min_rev ## max_rev ## _ ## name, \ - min_rev, max_rev, \ - FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ - FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) -#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) -#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) -#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) -#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) + name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + arch, min_rev, max_rev, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) +#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) +#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) +#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) #define REGISTER_TABLE_BB_CZ(name) \ - REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ FR_BZ_ ## name ## _STEP, \ FR_BB_ ## name ## _ROWS), \ - REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ FR_BZ_ ## name ## _STEP, \ FR_CZ_ ## name ## _ROWS) -#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) +#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) +#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* DRIVER is not used */ @@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { * 1K entries allows for some expansion of queue count and * size before we need to change the version. */ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, - A, A, 8, 1024), + F, A, A, 8, 1024), REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, - B, Z, 8, 1024), + F, B, Z, 8, 1024), REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), REGISTER_TABLE_BB_CZ(TIMER_TBL), REGISTER_TABLE_BB_CZ(TX_PACE_TBL), @@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* MSIX_PBA_TABLE is not mapped */ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ REGISTER_TABLE_BZ(RX_FILTER_TBL0), + REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), }; size_t efx_nic_get_regs_len(struct efx_nic *efx) @@ -469,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, * @count: Length of the @desc array * @mask: Bitmask of which elements of @desc are enabled * @stats: Buffer to update with the converted statistics. The length - * of this array must be at least the number of set bits in the - * first @count bits of @mask. + * of this array must be at least @count. * @dma_buf: DMA buffer containing hardware statistics * @accumulate: If set, the converted values will be added rather than * directly stored to the corresponding elements of @stats @@ -503,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, } if (accumulate) - *stats += val; + stats[index] += val; else - *stats = val; + stats[index] = val; } - - ++stats; } } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index fda29d39032..91c63ec79c5 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx) return efx->type->revision; } -extern u32 efx_farch_fpga_ver(struct efx_nic *efx); +u32 efx_farch_fpga_ver(struct efx_nic *efx); /* NIC has two interlinked PCI functions for the same port. */ static inline bool efx_nic_is_dual_func(struct efx_nic *efx) @@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; } +/* Report whether the NIC considers this TX queue empty, given the + * write_count used for the last doorbell push. May return false + * negative. + */ +static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count); +} + /* Decide whether to push a TX descriptor to the NIC vs merely writing * the doorbell. This can reduce latency when we are adding a single * descriptor to an empty queue, but is otherwise pointless. Further, @@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) { - unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); - - if (empty_read_count == 0) - return false; + bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 - && tx_queue->write_count - write_count == 1; + return was_empty && tx_queue->write_count - write_count == 1; } /* Returns a pointer to the specified descriptor in the RX descriptor queue */ @@ -386,9 +402,27 @@ enum { EF10_STAT_rx_align_error, EF10_STAT_rx_length_error, EF10_STAT_rx_nodesc_drops, + EF10_STAT_rx_pm_trunc_bb_overflow, + EF10_STAT_rx_pm_discard_bb_overflow, + EF10_STAT_rx_pm_trunc_vfifo_full, + EF10_STAT_rx_pm_discard_vfifo_full, + EF10_STAT_rx_pm_trunc_qbb, + EF10_STAT_rx_pm_discard_qbb, + EF10_STAT_rx_pm_discard_mapping, + EF10_STAT_rx_dp_q_disabled_packets, + EF10_STAT_rx_dp_di_dropped_packets, + EF10_STAT_rx_dp_streaming_packets, + EF10_STAT_rx_dp_emerg_fetch, + EF10_STAT_rx_dp_emerg_wait, EF10_STAT_COUNT }; +/* Maximum number of TX PIO buffers we may allocate to a function. + * This matches the total number of buffers on each SFC9100-family + * controller. + */ +#define EF10_TX_PIOBUF_COUNT 16 + /** * struct efx_ef10_nic_data - EF10 architecture NIC state * @mcdi_buf: DMA buffer for MCDI @@ -397,6 +431,13 @@ enum { * @n_allocated_vis: Number of VIs allocated to this function * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot * @must_restore_filters: Flag: filters have yet to be restored after MC reboot + * @n_piobufs: Number of PIO buffers allocated to this function + * @wc_membase: Base address of write-combining mapping of the memory BAR + * @pio_write_base: Base address for writing PIO buffers + * @pio_write_vi_base: Relative VI number for @pio_write_base + * @piobuf_handle: Handle of each PIO buffer allocated + * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC + * reboot * @rx_rss_context: Firmware handle for our RSS context * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 @@ -412,6 +453,11 @@ struct efx_ef10_nic_data { unsigned int n_allocated_vis; bool must_realloc_vis; bool must_restore_filters; + unsigned int n_piobufs; + void __iomem *wc_membase, *pio_write_base; + unsigned int pio_write_vi_base; + unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + bool must_restore_piobufs; u32 rx_rss_context; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; @@ -463,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx) return 1 << efx->vi_scale; } -extern int efx_init_sriov(void); -extern void efx_sriov_probe(struct efx_nic *efx); -extern int efx_sriov_init(struct efx_nic *efx); -extern void efx_sriov_mac_address_changed(struct efx_nic *efx); -extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); -extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); -extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); -extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); -extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr); -extern void efx_sriov_reset(struct efx_nic *efx); -extern void efx_sriov_fini(struct efx_nic *efx); -extern void efx_fini_sriov(void); +int efx_init_sriov(void); +void efx_sriov_probe(struct efx_nic *efx); +int efx_sriov_init(struct efx_nic *efx); +void efx_sriov_mac_address_changed(struct efx_nic *efx); +void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); +void efx_sriov_flr(struct efx_nic *efx, unsigned flr); +void efx_sriov_reset(struct efx_nic *efx); +void efx_sriov_fini(struct efx_nic *efx); +void efx_fini_sriov(void); #else @@ -500,22 +546,22 @@ static inline void efx_fini_sriov(void) {} #endif -extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); -extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, - u16 vlan, u8 qos); -extern int efx_sriov_get_vf_config(struct net_device *dev, int vf, - struct ifla_vf_info *ivf); -extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, - bool spoofchk); +int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); +int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos); +int efx_sriov_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivf); +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, + bool spoofchk); struct ethtool_ts_info; -extern void efx_ptp_probe(struct efx_nic *efx); -extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); -extern void efx_ptp_get_ts_info(struct efx_nic *efx, - struct ethtool_ts_info *ts_info); -extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_ptp_probe(struct efx_nic *efx); +int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; @@ -529,7 +575,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type; ************************************************************************** */ -extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); +int falcon_probe_board(struct efx_nic *efx, u16 revision_info); /* TX data path */ static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) @@ -597,58 +643,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) { channel->efx->type->ev_read_ack(channel); } -extern void efx_nic_event_test_start(struct efx_channel *channel); +void efx_nic_event_test_start(struct efx_channel *channel); /* Falcon/Siena queue operations */ -extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); -extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue); -extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); -extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); -extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue); -extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); -extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue); -extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); -extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); -extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue); -extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); -extern int efx_farch_ev_probe(struct efx_channel *channel); -extern int efx_farch_ev_init(struct efx_channel *channel); -extern void efx_farch_ev_fini(struct efx_channel *channel); -extern void efx_farch_ev_remove(struct efx_channel *channel); -extern int efx_farch_ev_process(struct efx_channel *channel, int quota); -extern void efx_farch_ev_read_ack(struct efx_channel *channel); -extern void efx_farch_ev_test_generate(struct efx_channel *channel); +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); +void efx_farch_tx_init(struct efx_tx_queue *tx_queue); +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); +void efx_farch_tx_write(struct efx_tx_queue *tx_queue); +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); +void efx_farch_rx_init(struct efx_rx_queue *rx_queue); +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); +void efx_farch_rx_write(struct efx_rx_queue *rx_queue); +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); +int efx_farch_ev_probe(struct efx_channel *channel); +int efx_farch_ev_init(struct efx_channel *channel); +void efx_farch_ev_fini(struct efx_channel *channel); +void efx_farch_ev_remove(struct efx_channel *channel); +int efx_farch_ev_process(struct efx_channel *channel, int quota); +void efx_farch_ev_read_ack(struct efx_channel *channel); +void efx_farch_ev_test_generate(struct efx_channel *channel); /* Falcon/Siena filter operations */ -extern int efx_farch_filter_table_probe(struct efx_nic *efx); -extern void efx_farch_filter_table_restore(struct efx_nic *efx); -extern void efx_farch_filter_table_remove(struct efx_nic *efx); -extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); -extern s32 efx_farch_filter_insert(struct efx_nic *efx, - struct efx_filter_spec *spec, bool replace); -extern int efx_farch_filter_remove_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id); -extern int efx_farch_filter_get_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *); -extern void efx_farch_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority); -extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority); -extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); -extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size); +int efx_farch_filter_table_probe(struct efx_nic *efx); +void efx_farch_filter_table_restore(struct efx_nic *efx); +void efx_farch_filter_table_remove(struct efx_nic *efx); +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); +s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, + bool replace); +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, u32 filter_id, + struct efx_filter_spec *); +void efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, u32 *buf, + u32 size); #ifdef CONFIG_RFS_ACCEL -extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, - struct efx_filter_spec *spec); -extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, - unsigned int index); +s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *spec); +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index); #endif -extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); -extern bool efx_nic_event_present(struct efx_channel *channel); +bool efx_nic_event_present(struct efx_channel *channel); /* Some statistics are computed as A - B where A and B each increase * linearly with some hardware counter(s) and the counters are read @@ -669,17 +715,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) } /* Interrupts */ -extern int efx_nic_init_interrupt(struct efx_nic *efx); -extern void efx_nic_irq_test_start(struct efx_nic *efx); -extern void efx_nic_fini_interrupt(struct efx_nic *efx); +int efx_nic_init_interrupt(struct efx_nic *efx); +void efx_nic_irq_test_start(struct efx_nic *efx); +void efx_nic_fini_interrupt(struct efx_nic *efx); /* Falcon/Siena interrupts */ -extern void efx_farch_irq_enable_master(struct efx_nic *efx); -extern void efx_farch_irq_test_generate(struct efx_nic *efx); -extern void efx_farch_irq_disable_master(struct efx_nic *efx); -extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); -extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); -extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); +void efx_farch_irq_enable_master(struct efx_nic *efx); +void efx_farch_irq_test_generate(struct efx_nic *efx); +void efx_farch_irq_disable_master(struct efx_nic *efx); +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { @@ -691,21 +737,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) } /* Global Resources */ -extern int efx_nic_flush_queues(struct efx_nic *efx); -extern void siena_prepare_flush(struct efx_nic *efx); -extern int efx_farch_fini_dmaq(struct efx_nic *efx); -extern void siena_finish_flush(struct efx_nic *efx); -extern void falcon_start_nic_stats(struct efx_nic *efx); -extern void falcon_stop_nic_stats(struct efx_nic *efx); -extern int falcon_reset_xaui(struct efx_nic *efx); -extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); -extern void efx_farch_init_common(struct efx_nic *efx); -extern void efx_ef10_handle_drain_event(struct efx_nic *efx); +int efx_nic_flush_queues(struct efx_nic *efx); +void siena_prepare_flush(struct efx_nic *efx); +int efx_farch_fini_dmaq(struct efx_nic *efx); +void siena_finish_flush(struct efx_nic *efx); +void falcon_start_nic_stats(struct efx_nic *efx); +void falcon_stop_nic_stats(struct efx_nic *efx); +int falcon_reset_xaui(struct efx_nic *efx); +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); +void efx_farch_init_common(struct efx_nic *efx); +void efx_ef10_handle_drain_event(struct efx_nic *efx); static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) { efx->type->rx_push_indir_table(efx); } -extern void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_push_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags); @@ -716,24 +762,22 @@ struct efx_farch_register_test { unsigned address; efx_oword_t mask; }; -extern int efx_farch_test_registers(struct efx_nic *efx, - const struct efx_farch_register_test *regs, - size_t n_regs); +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs); -extern size_t efx_nic_get_regs_len(struct efx_nic *efx); -extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); +size_t efx_nic_get_regs_len(struct efx_nic *efx); +void efx_nic_get_regs(struct efx_nic *efx, void *buf); -extern size_t -efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); -extern void -efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, - u64 *stats, const void *dma_buf, bool accumulate); +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); #define EFX_MAX_FLUSH_TIME 5000 -extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, - efx_qword_t *event); +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event); #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h index 45eeb707515..803bf445c08 100644 --- a/drivers/net/ethernet/sfc/phy.h +++ b/drivers/net/ethernet/sfc/phy.h @@ -15,7 +15,7 @@ */ extern const struct efx_phy_operations falcon_sfx7101_phy_ops; -extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); /**************************************************************************** * AMCC/Quake QT202x PHYs @@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops; #define QUAKE_LED_TXLINK (0) #define QUAKE_LED_RXLINK (8) -extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); +void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); /**************************************************************************** * Transwitch CX4 retimer @@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops; #define TXC_GPIO_DIR_INPUT 0 #define TXC_GPIO_DIR_OUTPUT 1 -extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); -extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); +void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); +void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); #endif diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 03acf57df04..3dd39dcfe36 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -220,6 +220,7 @@ struct efx_ptp_timeset { * @evt_list: List of MC receive events awaiting packets * @evt_free_list: List of free events * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @evt_overflow: Boolean indicating that event list has overflowed * @rx_evts: Instantiated events (on evt_list and evt_free_list) * @workwq: Work queue for processing pending PTP operations * @work: Work task @@ -270,6 +271,7 @@ struct efx_ptp_data { struct list_head evt_list; struct list_head evt_free_list; spinlock_t evt_lock; + bool evt_overflow; struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; struct workqueue_struct *workwq; struct work_struct work; @@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) } } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); } @@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, break; } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); return rc; @@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) __skb_queue_tail(q, skb); } else if (time_after(jiffies, match->expiry)) { match->state = PTP_PACKET_STATE_TIMED_OUT; - netif_warn(efx, rx_err, efx->net_dev, - "PTP packet - no timestamp seen\n"); + if (net_ratelimit()) + netif_warn(efx, rx_err, efx->net_dev, + "PTP packet - no timestamp seen\n"); __skb_queue_tail(q, skb); } else { /* Replace unprocessed entry and stop */ @@ -788,9 +801,14 @@ fail: static int efx_ptp_stop(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - int rc = efx_ptp_disable(efx); struct list_head *cursor; struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); if (ptp->rxfilter_installed) { efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, @@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx) list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { list_move(cursor, &efx->ptp_data->evt_free_list); } + ptp->evt_overflow = false; spin_unlock_bh(&efx->ptp_data->evt_lock); return rc; } +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + static void efx_ptp_pps_worker(struct work_struct *work) { struct efx_ptp_data *ptp = @@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) spin_lock_init(&ptp->evt_lock); for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + ptp->evt_overflow = false; ptp->phc_clock_info.owner = THIS_MODULE; snprintf(ptp->phc_clock_info.name, @@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) skb->len >= PTP_MIN_LENGTH && skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); } @@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, { if ((enable_wanted != efx->ptp_data->enabled) || (enable_wanted && (efx->ptp_data->mode != new_mode))) { - int rc; + int rc = 0; if (enable_wanted) { /* Change of mode requires disable */ @@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, * succeed. */ efx->ptp_data->mode = new_mode; - rc = efx_ptp_start(efx); + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); if (rc == 0) { rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS * 2); @@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) list_add_tail(&evt->link, &ptp->evt_list); queue_work(ptp->workwq, &ptp->work); - } else { - netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); + } else if (!ptp->evt_overflow) { + /* Log a warning message and set the event overflow flag. + * The message won't be logged again until the event queue + * becomes empty. + */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + ptp->evt_overflow = true; } spin_unlock_bh(&ptp->evt_lock); } @@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) if (rc != 0) return rc; - ptp_data->current_adjfreq = delta; + ptp_data->current_adjfreq = adjustment_ns; return 0; } @@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), @@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx) efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = &efx_ptp_channel_type; } + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 4a596725023..42488df1f4e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -12,6 +12,7 @@ #include <linux/in.h> #include <linux/slab.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/prefetch.h> @@ -93,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / @@ -188,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; + rx_buf->dma_addr = dma_addr + efx->rx_ip_align; rx_buf->page = page; - rx_buf->page_offset = page_offset + NET_IP_ALIGN; + rx_buf->page_offset = page_offset + efx->rx_ip_align; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; @@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_filter_spec spec; - const struct iphdr *ip; const __be16 *ports; + __be16 ether_type; int nhoff; int rc; - nhoff = skb_network_offset(skb); + /* The core RPS/RFS code has already parsed and validated + * VLAN, IP and transport headers. We assume they are in the + * header area. + */ if (skb->protocol == htons(ETH_P_8021Q)) { - EFX_BUG_ON_PARANOID(skb_headlen(skb) < - nhoff + sizeof(struct vlan_hdr)); - if (((const struct vlan_hdr *)skb->data + nhoff)-> - h_vlan_encapsulated_proto != htons(ETH_P_IP)) - return -EPROTONOSUPPORT; + const struct vlan_hdr *vh = + (const struct vlan_hdr *)skb->data; - /* This is IP over 802.1q VLAN. We can't filter on the - * IP 5-tuple and the vlan together, so just strip the - * vlan header and filter on the IP part. + /* We can't filter on the IP 5-tuple and the vlan + * together, so just strip the vlan header and filter + * on the IP part. */ - nhoff += sizeof(struct vlan_hdr); - } else if (skb->protocol != htons(ETH_P_IP)) { - return -EPROTONOSUPPORT; + EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); + ether_type = vh->h_vlan_encapsulated_proto; + nhoff = sizeof(struct vlan_hdr); + } else { + ether_type = skb->protocol; + nhoff = 0; } - /* RFS must validate the IP header length before calling us */ - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); - ip = (const struct iphdr *)(skb->data + nhoff); - if (ip_is_fragment(ip)) + if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); - ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, rxq_index); - rc = efx_filter_set_ipv4_full(&spec, ip->protocol, - ip->daddr, ports[1], ip->saddr, ports[0]); - if (rc) - return rc; + spec.match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec.ether_type = ether_type; + + if (ether_type == htons(ETH_P_IP)) { + const struct iphdr *ip = + (const struct iphdr *)(skb->data + nhoff); + + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + spec.ip_proto = ip->protocol; + spec.rem_host[0] = ip->saddr; + spec.loc_host[0] = ip->daddr; + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + } else { + const struct ipv6hdr *ip6 = + (const struct ipv6hdr *)(skb->data + nhoff); + + EFX_BUG_ON_PARANOID(skb_headlen(skb) < + nhoff + sizeof(*ip6) + 4); + spec.ip_proto = ip6->nexthdr; + memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); + memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); + ports = (const __be16 *)(ip6 + 1); + } + + spec.rem_port = ports[0]; + spec.loc_port = ports[1]; rc = efx->type->filter_rfs_insert(efx, &spec); if (rc < 0) @@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, channel = efx_get_channel(efx, skb_get_rx_queue(skb)); ++channel->rfs_filters_added; - netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", - (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", - &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), - rxq_index, flow_id, rc); + if (ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + spec.rem_host, ntohs(ports[0]), spec.loc_host, + ntohs(ports[1]), rxq_index, flow_id, rc); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + spec.rem_host, ntohs(ports[0]), spec.loc_host, + ntohs(ports[1]), rxq_index, flow_id, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h index 87698ae0bf7..a2f4a06ffa4 100644 --- a/drivers/net/ethernet/sfc/selftest.h +++ b/drivers/net/ethernet/sfc/selftest.h @@ -43,13 +43,12 @@ struct efx_self_tests { struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; }; -extern void efx_loopback_rx_packet(struct efx_nic *efx, - const char *buf_ptr, int pkt_len); -extern int efx_selftest(struct efx_nic *efx, - struct efx_self_tests *tests, - unsigned flags); -extern void efx_selftest_async_start(struct efx_nic *efx); -extern void efx_selftest_async_cancel(struct efx_nic *efx); -extern void efx_selftest_async_work(struct work_struct *data); +void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags); +void efx_selftest_async_start(struct efx_nic *efx); +void efx_selftest_async_cancel(struct efx_nic *efx); +void efx_selftest_async_work(struct work_struct *data); #endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 2ac91c5b5ee..c49d1fb1696 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -17,10 +17,46 @@ #include <net/ipv6.h> #include <linux/if_ether.h> #include <linux/highmem.h> +#include <linux/cache.h> #include "net_driver.h" #include "efx.h" +#include "io.h" #include "nic.h" #include "workarounds.h" +#include "ef10_regs.h" + +#ifdef EFX_USE_PIO + +#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE +#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) +unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; + +#endif /* EFX_USE_PIO */ + +static inline unsigned int +efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) +{ + return tx_queue->insert_count & tx_queue->ptr_mask; +} + +static inline struct efx_tx_buffer * +__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; +} + +static inline struct efx_tx_buffer * +efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer = + __efx_tx_queue_get_insert_buffer(tx_queue); + + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->flags); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + + return buffer; +} static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, @@ -29,8 +65,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, { if (buffer->unmap_len) { struct device *dma_dev = &tx_queue->efx->pci_dev->dev; - dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - - buffer->unmap_len); + dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); @@ -83,8 +118,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) */ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; - /* Possibly one more per segment for the alignment workaround */ - if (EFX_WORKAROUND_5391(efx)) + /* Possibly one more per segment for the alignment workaround, + * or for option descriptors + */ + if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) max_descs += EFX_TSO_MAX_SEGS; /* Possibly more for PCIe page boundaries within input fragments */ @@ -145,6 +182,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) } } +#ifdef EFX_USE_PIO + +struct efx_short_copy_buffer { + int used; + u8 buf[L1_CACHE_BYTES]; +}; + +/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + int block_len = len & ~(sizeof(copy_buf->buf) - 1); + + memcpy_toio(*piobuf, data, block_len); + *piobuf += block_len; + len -= block_len; + + if (len) { + data += block_len; + BUG_ON(copy_buf->used); + BUG_ON(len > sizeof(copy_buf->buf)); + memcpy(copy_buf->buf, data, len); + copy_buf->used = len; + } +} + +/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + if (copy_buf->used) { + /* if the copy buffer is partially full, fill it up and write */ + int copy_to_buf = + min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); + + memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); + copy_buf->used += copy_to_buf; + + /* if we didn't fill it up then we're done for now */ + if (copy_buf->used < sizeof(copy_buf->buf)) + return; + + memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); + *piobuf += sizeof(copy_buf->buf); + data += copy_to_buf; + len -= copy_to_buf; + copy_buf->used = 0; + } + + efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); +} + +static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + /* if there's anything in it, write the whole buffer, including junk */ + if (copy_buf->used) + memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); +} + +/* Traverse skb structure and copy fragments in to PIO buffer. + * Advances piobuf pointer. + */ +static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, + u8 __iomem **piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + int i; + + efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), + copy_buf); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + vaddr = kmap_atomic(skb_frag_page(f)); + + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, + skb_frag_size(f), copy_buf); + kunmap_atomic(vaddr); + } + + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list); +} + +static struct efx_tx_buffer * +efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + u8 __iomem *piobuf = tx_queue->piobuf; + + /* Copy to PIO buffer. Ensure the writes are padded to the end + * of a cache line, as this is required for write-combining to be + * effective on at least x86. + */ + + if (skb_shinfo(skb)->nr_frags) { + /* The size of the copy buffer will ensure all writes + * are the size of a cache line. + */ + struct efx_short_copy_buffer copy_buf; + + copy_buf.used = 0; + + efx_skb_copy_bits_to_pio(tx_queue->efx, skb, + &piobuf, ©_buf); + efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); + } else { + /* Pad the write to the size of a cache line. + * We can do this because we know the skb_shared_info sruct is + * after the source, and the destination buffer is big enough. + */ + BUILD_BUG_ON(L1_CACHE_BYTES > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + memcpy_toio(tx_queue->piobuf, skb->data, + ALIGN(skb->len, L1_CACHE_BYTES)); + } + + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, + ESF_DZ_TX_PIO_CONT, 0, + ESF_DZ_TX_PIO_BYTE_CNT, skb->len, + ESF_DZ_TX_PIO_BUF_ADDR, + tx_queue->piobuf_offset); + ++tx_queue->pio_packets; + ++tx_queue->insert_count; + return buffer; +} +#endif /* EFX_USE_PIO */ + /* * Add a socket buffer to a TX queue * @@ -167,7 +343,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) struct device *dma_dev = &efx->pci_dev->dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; - unsigned int len, unmap_len = 0, insert_ptr; + unsigned int len, unmap_len = 0; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; unsigned short dma_flags; @@ -189,6 +365,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) return NETDEV_TX_OK; } + /* Consider using PIO for short packets */ +#ifdef EFX_USE_PIO + if (skb->len <= efx_piobuf_size && tx_queue->piobuf && + efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) { + buffer = efx_enqueue_skb_pio(tx_queue, skb); + dma_flags = EFX_TX_BUF_OPTION; + goto finish_packet; + } +#endif + /* Map for DMA. Use dma_map_single rather than dma_map_page * since this is more efficient on machines with sparse * memory. @@ -208,11 +395,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Add to TX queue, splitting across DMA boundaries */ do { - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; - buffer = &tx_queue->buffer[insert_ptr]; - EFX_BUG_ON_PARANOID(buffer->flags); - EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(buffer->unmap_len); + buffer = efx_tx_queue_get_insert_buffer(tx_queue); dma_len = efx_max_tx_len(efx, dma_addr); if (likely(dma_len >= len)) @@ -230,6 +413,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Transfer ownership of the unmapping to the final buffer */ buffer->flags = EFX_TX_BUF_CONT | dma_flags; buffer->unmap_len = unmap_len; + buffer->dma_offset = buffer->dma_addr - unmap_addr; unmap_len = 0; /* Get address and size of next fragment */ @@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) } /* Transfer ownership of the skb to the final buffer */ +finish_packet: buffer->skb = skb; buffer->flags = EFX_TX_BUF_SKB | dma_flags; @@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) while (tx_queue->insert_count != tx_queue->write_count) { unsigned int pkts_compl = 0, bytes_compl = 0; --tx_queue->insert_count; - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; - buffer = &tx_queue->buffer[insert_ptr]; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); } @@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) * @tcp_off: Offset of TCP header * @header_len: Number of bytes of header * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload + * @header_dma_addr: Header DMA address, when using option descriptors + * @header_unmap_len: Header DMA mapped length, or 0 if not using option + * descriptors * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. @@ -636,7 +823,7 @@ struct tso_state { /* Output position */ unsigned out_len; unsigned seqnum; - unsigned ipv4_id; + u16 ipv4_id; unsigned packet_space; /* Input position */ @@ -651,6 +838,8 @@ struct tso_state { unsigned int tcp_off; unsigned header_len; unsigned int ip_base_len; + dma_addr_t header_dma_addr; + unsigned int header_unmap_len; }; @@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; - unsigned dma_len, insert_ptr; + unsigned dma_len; EFX_BUG_ON_PARANOID(len <= 0); while (1) { - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; - buffer = &tx_queue->buffer[insert_ptr]; + buffer = efx_tx_queue_get_insert_buffer(tx_queue); ++tx_queue->insert_count; EFX_BUG_ON_PARANOID(tx_queue->insert_count - tx_queue->read_count >= efx->txq_entries); - EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(buffer->unmap_len); - EFX_BUG_ON_PARANOID(buffer->flags); - buffer->dma_addr = dma_addr; dma_len = efx_max_tx_len(efx, dma_addr); @@ -796,6 +980,7 @@ static int efx_tso_put_header(struct efx_tx_queue *tx_queue, return -ENOMEM; } buffer->unmap_len = buffer->len; + buffer->dma_offset = 0; buffer->flags |= EFX_TX_BUF_MAP_SINGLE; } @@ -814,19 +999,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; - buffer = &tx_queue->buffer[tx_queue->insert_count & - tx_queue->ptr_mask]; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); } } /* Parse the SKB header and initialise state. */ -static void tso_start(struct tso_state *st, const struct sk_buff *skb) +static int tso_start(struct tso_state *st, struct efx_nic *efx, + const struct sk_buff *skb) { + bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int header_len, in_len; + dma_addr_t dma_addr; + st->ip_off = skb_network_header(skb) - skb->data; st->tcp_off = skb_transport_header(skb) - skb->data; - st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + in_len = skb_headlen(skb) - header_len; + st->header_len = header_len; + st->in_len = in_len; if (st->protocol == htons(ETH_P_IP)) { st->ip_base_len = st->header_len - st->ip_off; st->ipv4_id = ntohs(ip_hdr(skb)->id); @@ -840,9 +1033,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); - st->out_len = skb->len - st->header_len; - st->unmap_len = 0; - st->dma_flags = 0; + st->out_len = skb->len - header_len; + + if (!use_options) { + st->header_unmap_len = 0; + + if (likely(in_len == 0)) { + st->dma_flags = 0; + st->unmap_len = 0; + return 0; + } + + dma_addr = dma_map_single(dma_dev, skb->data + header_len, + in_len, DMA_TO_DEVICE); + st->dma_flags = EFX_TX_BUF_MAP_SINGLE; + st->dma_addr = dma_addr; + st->unmap_addr = dma_addr; + st->unmap_len = in_len; + } else { + dma_addr = dma_map_single(dma_dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + st->header_dma_addr = dma_addr; + st->header_unmap_len = skb_headlen(skb); + st->dma_flags = 0; + st->dma_addr = dma_addr + header_len; + st->unmap_len = 0; + } + + return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, @@ -860,24 +1078,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, return -ENOMEM; } -static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, - const struct sk_buff *skb) -{ - int hl = st->header_len; - int len = skb_headlen(skb) - hl; - - st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, - len, DMA_TO_DEVICE); - if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { - st->dma_flags = EFX_TX_BUF_MAP_SINGLE; - st->unmap_len = len; - st->in_len = len; - st->dma_addr = st->unmap_addr; - return 0; - } - return -ENOMEM; -} - /** * tso_fill_packet_with_fragment - form descriptors for the current fragment @@ -922,6 +1122,7 @@ static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, if (st->in_len == 0) { /* Transfer ownership of the DMA mapping */ buffer->unmap_len = st->unmap_len; + buffer->dma_offset = buffer->unmap_len - buffer->len; buffer->flags |= st->dma_flags; st->unmap_len = 0; } @@ -944,55 +1145,98 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, struct tso_state *st) { struct efx_tx_buffer *buffer = - &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; - struct tcphdr *tsoh_th; - unsigned ip_length; - u8 *header; - int rc; + efx_tx_queue_get_insert_buffer(tx_queue); + bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; + u8 tcp_flags_clear; - /* Allocate and insert a DMA-mapped header buffer. */ - header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); - if (!header) - return -ENOMEM; - - tsoh_th = (struct tcphdr *)(header + st->tcp_off); - - /* Copy and update the headers. */ - memcpy(header, skb->data, st->header_len); - - tsoh_th->seq = htonl(st->seqnum); - st->seqnum += skb_shinfo(skb)->gso_size; - if (st->out_len > skb_shinfo(skb)->gso_size) { - /* This packet will not finish the TSO burst. */ + if (!is_last) { st->packet_space = skb_shinfo(skb)->gso_size; - tsoh_th->fin = 0; - tsoh_th->psh = 0; + tcp_flags_clear = 0x09; /* mask out FIN and PSH */ } else { - /* This packet will be the last in the TSO burst. */ st->packet_space = st->out_len; - tsoh_th->fin = tcp_hdr(skb)->fin; - tsoh_th->psh = tcp_hdr(skb)->psh; + tcp_flags_clear = 0x00; } - ip_length = st->ip_base_len + st->packet_space; - if (st->protocol == htons(ETH_P_IP)) { - struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); + if (!st->header_unmap_len) { + /* Allocate and insert a DMA-mapped header buffer. */ + struct tcphdr *tsoh_th; + unsigned ip_length; + u8 *header; + int rc; + + header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); + if (!header) + return -ENOMEM; - tsoh_iph->tot_len = htons(ip_length); + tsoh_th = (struct tcphdr *)(header + st->tcp_off); + + /* Copy and update the headers. */ + memcpy(header, skb->data, st->header_len); + + tsoh_th->seq = htonl(st->seqnum); + ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear; + + ip_length = st->ip_base_len + st->packet_space; + + if (st->protocol == htons(ETH_P_IP)) { + struct iphdr *tsoh_iph = + (struct iphdr *)(header + st->ip_off); + + tsoh_iph->tot_len = htons(ip_length); + tsoh_iph->id = htons(st->ipv4_id); + } else { + struct ipv6hdr *tsoh_iph = + (struct ipv6hdr *)(header + st->ip_off); + + tsoh_iph->payload_len = htons(ip_length); + } - /* Linux leaves suitable gaps in the IP ID space for us to fill. */ - tsoh_iph->id = htons(st->ipv4_id); - st->ipv4_id++; + rc = efx_tso_put_header(tx_queue, buffer, header); + if (unlikely(rc)) + return rc; } else { - struct ipv6hdr *tsoh_iph = - (struct ipv6hdr *)(header + st->ip_off); + /* Send the original headers with a TSO option descriptor + * in front + */ + u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear; - tsoh_iph->payload_len = htons(ip_length); + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = 0; + buffer->unmap_len = 0; + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, + ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); + ++tx_queue->insert_count; + + /* We mapped the headers in tso_start(). Unmap them + * when the last segment is completed. + */ + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + buffer->dma_addr = st->header_dma_addr; + buffer->len = st->header_len; + if (is_last) { + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; + buffer->unmap_len = st->header_unmap_len; + buffer->dma_offset = 0; + /* Ensure we only unmap them once in case of a + * later DMA mapping error and rollback + */ + st->header_unmap_len = 0; + } else { + buffer->flags = EFX_TX_BUF_CONT; + buffer->unmap_len = 0; + } + ++tx_queue->insert_count; } - rc = efx_tso_put_header(tx_queue, buffer, header); - if (unlikely(rc)) - return rc; + st->seqnum += skb_shinfo(skb)->gso_size; + + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ + ++st->ipv4_id; ++tx_queue->tso_packets; @@ -1023,12 +1267,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); - tso_start(&state, skb); + rc = tso_start(&state, efx, skb); + if (rc) + goto mem_err; - /* Assume that skb header area contains exactly the headers, and - * all payload is in the frag list. - */ - if (skb_headlen(skb) == state.header_len) { + if (likely(state.in_len == 0)) { /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; @@ -1037,9 +1280,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, if (rc) goto mem_err; } else { - rc = tso_get_head_fragment(&state, efx, skb); - if (rc) - goto mem_err; + /* Payload starts in the header area. */ frag_i = -1; } @@ -1091,6 +1332,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, state.unmap_len, DMA_TO_DEVICE); } + /* Free the header DMA mapping, if using option descriptors */ + if (state.header_unmap_len) + dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, + state.header_unmap_len, DMA_TO_DEVICE); + efx_enqueue_unwind(tx_queue); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 770036bc2d8..513ed8b1ba5 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev) dev->watchdog_timeo = timeout; dev->irq = MACE_ETHERNET_IRQ; dev->base_addr = (unsigned long)&mace->eth; - memcpy(dev->dev_addr, o2meth_eaddr, 6); + memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); priv = netdev_priv(dev); spin_lock_init(&priv->meth_lock); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index ee18e6f7b4f..acbbe48a519 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev) cancel_work_sync(&tp->phy_task); unregister_netdev(dev); sis190_release_board(pdev); - pci_set_drvdata(pdev, NULL); } static struct pci_driver sis190_pci_driver = { diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 068fc44d37e..753630f5d3d 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_SMSC bool "SMC (SMSC)/Western Digital devices" default y depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \ - BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA + BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -39,7 +39,7 @@ config SMC91X select CRC32 select MII depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ - MN10300 || COLDFIRE || ARM64) + MN10300 || COLDFIRE || ARM64 || XTENSA) ---help--- This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 03b256af7ed..8ae1f8a7bf3 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -91,9 +91,9 @@ static int rx_copybreak; /* These identify the driver base version and may not be removed. */ static char version[] = -DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; +DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>"; static char version2[] = -" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; +" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver"); @@ -332,9 +332,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* when built into the kernel, we only print version if device is found */ #ifndef MODULE - static int printed_version; - if (!printed_version++) - printk(KERN_INFO "%s%s", version, version2); + pr_info_once("%s%s\n", version, version2); #endif card_idx++; @@ -423,9 +421,9 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4)); if (debug > 2) { - dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); + dev_dbg(&pdev->dev, "EEPROM contents:\n"); for (i = 0; i < 64; i++) - printk(" %4.4x%s", read_eeprom(ep, i), + pr_cont(" %4.4x%s", read_eeprom(ep, i), i % 16 == 15 ? "\n" : ""); } @@ -490,10 +488,10 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n", - dev->name, pci_id_tbl[chip_idx].name, - (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq, - dev->dev_addr); + netdev_info(dev, "%s at %lx, IRQ %d, %pM\n", + pci_id_tbl[chip_idx].name, + (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq, + dev->dev_addr); out: return ret; @@ -703,9 +701,8 @@ static int epic_open(struct net_device *dev) mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); if (dev->if_port == 1) { if (debug > 1) - printk(KERN_INFO "%s: Using the 10base2 transceiver, MII " - "status %4.4x.\n", - dev->name, mdio_read(dev, ep->phys[0], MII_BMSR)); + netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n", + mdio_read(dev, ep->phys[0], MII_BMSR)); } } else { int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); @@ -715,10 +712,10 @@ static int epic_open(struct net_device *dev) else if (! (mii_lpa & LPA_LPACK)) mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); if (debug > 1) - printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d" - " register read of %4.4x.\n", dev->name, - ep->mii.full_duplex ? "full" : "half", - ep->phys[0], mii_lpa); + netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n", + ep->mii.full_duplex ? "full" + : "half", + ep->phys[0], mii_lpa); } } @@ -738,10 +735,9 @@ static int epic_open(struct net_device *dev) TxUnderrun); if (debug > 1) { - printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d " - "status %4.4x %s-duplex.\n", - dev->name, ioaddr, irq, er32(GENCTL), - ep->mii.full_duplex ? "full" : "half"); + netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n", + ioaddr, irq, er32(GENCTL), + ep->mii.full_duplex ? "full" : "half"); } /* Set the timer to switch to check for link beat and perhaps switch @@ -790,8 +786,8 @@ static void epic_restart(struct net_device *dev) /* Soft reset the chip. */ ew32(GENCTL, 0x4001); - printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", - dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); + netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", + ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); udelay(1); /* This magic is documented in SMSC app note 7.15 */ @@ -827,9 +823,8 @@ static void epic_restart(struct net_device *dev) ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | TxUnderrun); - printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" - " interrupt %4.4x.\n", - dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT)); + netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n", + er32(COMMAND), er32(GENCTL), er32(INTSTAT)); } static void check_media(struct net_device *dev) @@ -846,9 +841,9 @@ static void check_media(struct net_device *dev) return; if (ep->mii.full_duplex != duplex) { ep->mii.full_duplex = duplex; - printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" - " partner capability of %4.4x.\n", dev->name, - ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); + netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n", + ep->mii.full_duplex ? "full" : "half", + ep->phys[0], mii_lpa); ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); } } @@ -861,11 +856,10 @@ static void epic_timer(unsigned long data) int next_tick = 5*HZ; if (debug > 3) { - printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", - dev->name, er32(TxSTAT)); - printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " - "IntStatus %4.4x RxStatus %4.4x.\n", dev->name, - er32(INTMASK), er32(INTSTAT), er32(RxSTAT)); + netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n", + er32(TxSTAT)); + netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n", + er32(INTMASK), er32(INTSTAT), er32(RxSTAT)); } check_media(dev); @@ -880,11 +874,11 @@ static void epic_tx_timeout(struct net_device *dev) void __iomem *ioaddr = ep->ioaddr; if (debug > 0) { - printk(KERN_WARNING "%s: Transmit timeout using MII device, " - "Tx status %4.4x.\n", dev->name, er16(TxSTAT)); + netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n", + er16(TxSTAT)); if (debug > 1) { - printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", - dev->name, ep->dirty_tx, ep->cur_tx); + netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n", + ep->dirty_tx, ep->cur_tx); } } if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */ @@ -994,9 +988,8 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) ew32(COMMAND, TxQueued); if (debug > 4) - printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " - "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len, - entry, ctrl_word, er32(TxSTAT)); + netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n", + skb->len, entry, ctrl_word, er32(TxSTAT)); return NETDEV_TX_OK; } @@ -1009,8 +1002,8 @@ static void epic_tx_error(struct net_device *dev, struct epic_private *ep, #ifndef final_version /* There was an major error, log it. */ if (debug > 1) - printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", - dev->name, status); + netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n", + status); #endif stats->tx_errors++; if (status & 0x1050) @@ -1057,9 +1050,8 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) #ifndef final_version if (cur_tx - dirty_tx > TX_RING_SIZE) { - printk(KERN_WARNING - "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", - dev->name, dirty_tx, cur_tx, ep->tx_full); + netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dirty_tx, cur_tx, ep->tx_full); dirty_tx += TX_RING_SIZE; } #endif @@ -1086,8 +1078,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) ew32(INTSTAT, status & EpicNormalEvent); if (debug > 4) { - printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " - "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT)); + netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n", + status, er32(INTSTAT)); } if ((status & IntrSummary) == 0) @@ -1125,8 +1117,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) ew32(COMMAND, RestartTx); } if (status & PCIBusErr170) { - printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", - dev->name, status); + netdev_err(dev, "PCI Bus Error! status %4.4x.\n", + status); epic_pause(dev); epic_restart(dev); } @@ -1136,8 +1128,8 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) out: if (debug > 3) { - printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n", - dev->name, status); + netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n", + status); } return IRQ_RETVAL(handled); @@ -1151,7 +1143,7 @@ static int epic_rx(struct net_device *dev, int budget) int work_done = 0; if (debug > 4) - printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry, + netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry, ep->rx_ring[entry].rxstatus); if (rx_work_limit > budget) @@ -1162,16 +1154,17 @@ static int epic_rx(struct net_device *dev, int budget) int status = ep->rx_ring[entry].rxstatus; if (debug > 4) - printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status); + netdev_dbg(dev, " epic_rx() status was %8.8x.\n", + status); if (--rx_work_limit < 0) break; if (status & 0x2006) { if (debug > 2) - printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n", - dev->name, status); + netdev_dbg(dev, "epic_rx() error status was %8.8x.\n", + status); if (status & 0x2000) { - printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " - "multiple buffers, status %4.4x!\n", dev->name, status); + netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n", + status); dev->stats.rx_length_errors++; } else if (status & 0x0006) /* Rx Frame errors are counted in hardware. */ @@ -1183,9 +1176,8 @@ static int epic_rx(struct net_device *dev, int budget) struct sk_buff *skb; if (pkt_len > PKT_BUF_SZ - 4) { - printk(KERN_ERR "%s: Oversized Ethernet frame, status %x " - "%d bytes.\n", - dev->name, status, pkt_len); + netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n", + status, pkt_len); pkt_len = 1514; } /* Check if the packet is long enough to accept without copying @@ -1305,8 +1297,8 @@ static int epic_close(struct net_device *dev) napi_disable(&ep->napi); if (debug > 1) - printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, er32(INTSTAT)); + netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n", + er32(INTSTAT)); del_timer_sync(&ep->timer); @@ -1324,7 +1316,7 @@ static int epic_close(struct net_device *dev) ep->rx_ring[i].buflength = 0; if (skb) { pci_unmap_single(pdev, ep->rx_ring[i].bufaddr, - ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + ep->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ @@ -1535,7 +1527,6 @@ static void epic_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); /* pci_power_off(pdev, -1); */ } @@ -1588,8 +1579,7 @@ static int __init epic_init (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE - printk (KERN_INFO "%s%s", - version, version2); + pr_info("%s%s\n", version, version2); #endif return pci_register_driver(&epic_driver); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index afe01c4088a..0f096a89005 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -106,16 +106,16 @@ MODULE_ALIAS("platform:smc911x"); #define POWER_DOWN 1 #if SMC_DEBUG > 0 -#define DBG(n, args...) \ +#define DBG(n, dev, args...) \ do { \ if (SMC_DEBUG & (n)) \ - printk(args); \ + netdev_dbg(dev, args); \ } while (0) -#define PRINTK(args...) printk(args) +#define PRINTK(dev, args...) netdev_info(dev, args) #else -#define DBG(n, args...) do { } while (0) -#define PRINTK(args...) printk(KERN_DEBUG args) +#define DBG(n, dev, args...) do { } while (0) +#define PRINTK(dev, args...) netdev_dbg(dev, args) #endif #if SMC_DEBUG_PKTS > 0 @@ -130,21 +130,23 @@ static void PRINT_PKT(u_char *buf, int length) for (i = 0; i < lines ; i ++) { int cur; + printk(KERN_DEBUG); for (cur = 0; cur < 8; cur++) { u_char a, b; a = *buf++; b = *buf++; - printk("%02x%02x ", a, b); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); } + printk(KERN_DEBUG); for (i = 0; i < remainder/2 ; i++) { u_char a, b; a = *buf++; b = *buf++; - printk("%02x%02x ", a, b); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); } #else #define PRINT_PKT(x...) do { } while (0) @@ -176,7 +178,7 @@ static void smc911x_reset(struct net_device *dev) unsigned int reg, timeout=0, resets=1, irq_cfg; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* Take out of PM setting first */ if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { @@ -188,7 +190,7 @@ static void smc911x_reset(struct net_device *dev) reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; } while (--timeout && !reg); if (timeout == 0) { - PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name); + PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n"); return; } } @@ -206,14 +208,14 @@ static void smc911x_reset(struct net_device *dev) reg = SMC_GET_HW_CFG(lp); /* If chip indicates reset timeout then try again */ if (reg & HW_CFG_SRST_TO_) { - PRINTK("%s: chip reset timeout, retrying...\n", dev->name); + PRINTK(dev, "chip reset timeout, retrying...\n"); resets++; break; } } while (--timeout && (reg & HW_CFG_SRST_)); } if (timeout == 0) { - PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name); + PRINTK(dev, "smc911x_reset timeout waiting for reset\n"); return; } @@ -223,7 +225,7 @@ static void smc911x_reset(struct net_device *dev) udelay(10); if (timeout == 0){ - PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name); + PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n"); return; } @@ -270,7 +272,7 @@ static void smc911x_enable(struct net_device *dev) unsigned mask, cfg, cr; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); @@ -296,7 +298,7 @@ static void smc911x_enable(struct net_device *dev) /* Turn on receiver and enable RX */ if (cr & MAC_CR_RXEN_) - DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); + DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n"); SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); @@ -327,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev) unsigned cr; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__); + DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__); /* Disable IRQ's */ SMC_SET_INT_EN(lp, 0); @@ -346,7 +348,8 @@ static inline void smc911x_drop_pkt(struct net_device *dev) struct smc911x_local *lp = netdev_priv(dev); unsigned int fifo_count, timeout, reg; - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__); + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n", + CARDNAME, __func__); fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; if (fifo_count <= 4) { /* Manually dump the packet data */ @@ -361,7 +364,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev) reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; } while (--timeout && reg); if (timeout == 0) { - PRINTK("%s: timeout waiting for RX fast forward\n", dev->name); + PRINTK(dev, "timeout waiting for RX fast forward\n"); } } } @@ -379,11 +382,11 @@ static inline void smc911x_rcv(struct net_device *dev) struct sk_buff *skb; unsigned char *data; - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", - dev->name, __func__); + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n", + __func__); status = SMC_GET_RX_STS_FIFO(lp); - DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n", - dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); + DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n", + (status & 0x3fff0000) >> 16, status & 0xc000ffff); pkt_len = (status & RX_STS_PKT_LEN_) >> 16; if (status & RX_STS_ES_) { /* Deal with a bad packet */ @@ -403,8 +406,7 @@ static inline void smc911x_rcv(struct net_device *dev) /* Alloc a buffer with extra room for DMA alignment */ skb = netdev_alloc_skb(dev, pkt_len+32); if (unlikely(skb == NULL)) { - PRINTK( "%s: Low memory, rcvd packet dropped.\n", - dev->name); + PRINTK(dev, "Low memory, rcvd packet dropped.\n"); dev->stats.rx_dropped++; smc911x_drop_pkt(dev); return; @@ -422,8 +424,8 @@ static inline void smc911x_rcv(struct net_device *dev) /* Lower the FIFO threshold if possible */ fifo = SMC_GET_FIFO_INT(lp); if (fifo & 0xFF) fifo--; - DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n", - dev->name, fifo & 0xff); + DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n", + fifo & 0xff); SMC_SET_FIFO_INT(lp, fifo); /* Setup RX DMA */ SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); @@ -436,7 +438,7 @@ static inline void smc911x_rcv(struct net_device *dev) SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); SMC_PULL_DATA(lp, data, pkt_len+2+3); - DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name); + DBG(SMC_DEBUG_PKTS, dev, "Received packet\n"); PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); @@ -456,7 +458,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) unsigned int cmdA, cmdB, len; unsigned char *buf; - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); BUG_ON(lp->pending_tx_skb == NULL); skb = lp->pending_tx_skb; @@ -481,12 +483,12 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) /* tag is packet length so we can use this in stats update later */ cmdB = (skb->len << 16) | (skb->len & 0x7FF); - DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", - dev->name, len, len, buf, cmdA, cmdB); + DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", + len, len, buf, cmdA, cmdB); SMC_SET_TX_FIFO(lp, cmdA); SMC_SET_TX_FIFO(lp, cmdB); - DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name); + DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); PRINT_PKT(buf, len <= 64 ? len : 64); /* Send pkt via PIO or DMA */ @@ -517,20 +519,20 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int free; unsigned long flags; - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", - dev->name, __func__); + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", + __func__); spin_lock_irqsave(&lp->lock, flags); BUG_ON(lp->pending_tx_skb != NULL); free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; - DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free); + DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free); /* Turn off the flow when running out of space in FIFO */ if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { - DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", - dev->name, free); + DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n", + free); /* Reenable when at least 1 packet of size MTU present */ SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); lp->tx_throttle = 1; @@ -545,8 +547,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * End padding 15 bytes */ if (unlikely(free < (skb->len + 8 + 15 + 15))) { - printk("%s: No Tx free space %d < %d\n", - dev->name, free, skb->len); + netdev_warn(dev, "No Tx free space %d < %d\n", + free, skb->len); lp->pending_tx_skb = NULL; dev->stats.tx_errors++; dev->stats.tx_dropped++; @@ -561,13 +563,13 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * the DMA IRQ starts it */ if (lp->txdma_active) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n"); lp->pending_tx_skb = skb; netif_stop_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n"); lp->txdma_active = 1; } } @@ -589,20 +591,19 @@ static void smc911x_tx(struct net_device *dev) struct smc911x_local *lp = netdev_priv(dev); unsigned int tx_status; - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", - dev->name, __func__); + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", + __func__); /* Collect the TX status */ while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { - DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n", - dev->name, - (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); + DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n", + (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); tx_status = SMC_GET_TX_STS_FIFO(lp); dev->stats.tx_packets++; dev->stats.tx_bytes+=tx_status>>16; - DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", - dev->name, (tx_status & 0xffff0000) >> 16, - tx_status & 0x0000ffff); + DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n", + (tx_status & 0xffff0000) >> 16, + tx_status & 0x0000ffff); /* count Tx errors, but ignore lost carrier errors when in * full-duplex mode */ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && @@ -640,8 +641,8 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) SMC_GET_MII(lp, phyreg, phyaddr, phydata); - DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", - __func__, phyaddr, phyreg, phydata); + DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", + __func__, phyaddr, phyreg, phydata); return phydata; } @@ -654,8 +655,8 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, { struct smc911x_local *lp = netdev_priv(dev); - DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); + DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); SMC_SET_MII(lp, phyreg, phyaddr, phydata); } @@ -670,7 +671,7 @@ static void smc911x_phy_detect(struct net_device *dev) int phyaddr; unsigned int cfg, id1, id2; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); lp->phy_type = 0; @@ -731,8 +732,8 @@ static void smc911x_phy_detect(struct net_device *dev) lp->phy_type = id1 << 16 | id2; } - DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n", - dev->name, id1, id2, lp->mii.phy_id); + DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n", + id1, id2, lp->mii.phy_id); } /* @@ -745,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev) int phyaddr = lp->mii.phy_id; int bmcr; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* Enter Link Disable state */ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); @@ -792,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy) unsigned long flags; unsigned int reg; - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); spin_lock_irqsave(&lp->lock, flags); reg = SMC_GET_PMT_CTRL(lp); @@ -851,18 +852,18 @@ static void smc911x_phy_check_media(struct net_device *dev, int init) int phyaddr = lp->mii.phy_id; unsigned int bmcr, cr; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { /* duplex state has changed */ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); SMC_GET_MAC_CR(lp, cr); if (lp->mii.full_duplex) { - DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name); + DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n"); bmcr |= BMCR_FULLDPLX; cr |= MAC_CR_RCVOWN_; } else { - DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name); + DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n"); bmcr &= ~BMCR_FULLDPLX; cr &= ~MAC_CR_RCVOWN_; } @@ -891,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work) int status; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); /* * We should not be called if phy_type is zero. @@ -900,7 +901,7 @@ static void smc911x_phy_configure(struct work_struct *work) return; if (smc911x_phy_reset(dev, phyaddr)) { - printk("%s: PHY reset timed out\n", dev->name); + netdev_info(dev, "PHY reset timed out\n"); return; } spin_lock_irqsave(&lp->lock, flags); @@ -922,7 +923,7 @@ static void smc911x_phy_configure(struct work_struct *work) /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { - printk(KERN_INFO "Auto negotiation NOT supported\n"); + netdev_info(dev, "Auto negotiation NOT supported\n"); smc911x_phy_fixed(dev); goto smc911x_phy_configure_exit; } @@ -960,8 +961,8 @@ static void smc911x_phy_configure(struct work_struct *work) udelay(10); SMC_GET_PHY_MII_ADV(lp, phyaddr, status); - DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps); - DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps); + DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps); + DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps); /* Restart auto-negotiation process in order to advertise my caps */ SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); @@ -984,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev) int phyaddr = lp->mii.phy_id; int status; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); if (lp->phy_type == 0) return; @@ -992,10 +993,10 @@ static void smc911x_phy_interrupt(struct net_device *dev) smc911x_phy_check_media(dev, 0); /* read to clear status bits */ SMC_GET_PHY_INT_SRC(lp, phyaddr,status); - DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n", - dev->name, status & 0xffff); - DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n", - dev->name, SMC_GET_AFC_CFG(lp)); + DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n", + status & 0xffff); + DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n", + SMC_GET_AFC_CFG(lp)); } /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ @@ -1012,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) unsigned int rx_overrun=0, cr, pkts; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); @@ -1033,8 +1034,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) do { status = SMC_GET_INT(lp); - DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", - dev->name, status, mask, status & ~mask); + DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", + status, mask, status & ~mask); status &= mask; if (!status) @@ -1066,7 +1067,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) SMC_GET_MAC_CR(lp, cr); cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(lp, cr); - DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); + DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; } @@ -1078,7 +1079,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(lp, cr); rx_overrun=1; - DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); + DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; } @@ -1087,23 +1088,23 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* Handle receive condition */ if ((status & INT_STS_RSFL_) || rx_overrun) { unsigned int fifo; - DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name); + DBG(SMC_DEBUG_RX, dev, "RX irq\n"); fifo = SMC_GET_RX_FIFO_INF(lp); pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; - DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n", - dev->name, pkts, fifo & 0xFFFF ); + DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n", + pkts, fifo & 0xFFFF); if (pkts != 0) { #ifdef SMC_USE_DMA unsigned int fifo; if (lp->rxdma_active){ - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, - "%s: RX DMA active\n", dev->name); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, + "RX DMA active\n"); /* The DMA is already running so up the IRQ threshold */ fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; fifo |= pkts & 0xFF; - DBG(SMC_DEBUG_RX, - "%s: Setting RX stat FIFO threshold to %d\n", - dev->name, fifo & 0xff); + DBG(SMC_DEBUG_RX, dev, + "Setting RX stat FIFO threshold to %d\n", + fifo & 0xff); SMC_SET_FIFO_INT(lp, fifo); } else #endif @@ -1113,7 +1114,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) } /* Handle transmit FIFO available */ if (status & INT_STS_TDFA_) { - DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name); + DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n"); SMC_SET_FIFO_TDA(lp, 0xFF); lp->tx_throttle = 0; #ifdef SMC_USE_DMA @@ -1125,9 +1126,9 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* Handle transmit done condition */ #if 1 if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { - DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, - "%s: Tx stat FIFO limit (%d) /GPT irq\n", - dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); + DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev, + "Tx stat FIFO limit (%d) /GPT irq\n", + (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); smc911x_tx(dev); SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); SMC_ACK_INT(lp, INT_STS_TSFL_); @@ -1135,23 +1136,20 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) } #else if (status & INT_STS_TSFL_) { - DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, ); + DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?); smc911x_tx(dev); SMC_ACK_INT(lp, INT_STS_TSFL_); } if (status & INT_STS_GPT_INT_) { - DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", - dev->name, - SMC_GET_IRQ_CFG(lp), - SMC_GET_FIFO_INT(lp), - SMC_GET_RX_CFG(lp)); - DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x " - "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", - dev->name, - (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, - SMC_GET_RX_FIFO_INF(lp) & 0xffff, - SMC_GET_RX_STS_FIFO_PEEK(lp)); + DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", + SMC_GET_IRQ_CFG(lp), + SMC_GET_FIFO_INT(lp), + SMC_GET_RX_CFG(lp)); + DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", + (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, + SMC_GET_RX_FIFO_INF(lp) & 0xffff, + SMC_GET_RX_STS_FIFO_PEEK(lp)); SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); SMC_ACK_INT(lp, INT_STS_GPT_INT_); } @@ -1159,7 +1157,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* Handle PHY interrupt condition */ if (status & INT_STS_PHY_INT_) { - DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name); + DBG(SMC_DEBUG_MISC, dev, "PHY irq\n"); smc911x_phy_interrupt(dev); SMC_ACK_INT(lp, INT_STS_PHY_INT_); } @@ -1168,8 +1166,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* restore mask state */ SMC_SET_INT_EN(lp, mask); - DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n", - dev->name, 8-timeout); + DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n", + 8-timeout); spin_unlock_irqrestore(&lp->lock, flags); @@ -1185,9 +1183,9 @@ smc911x_tx_dma_irq(int dma, void *data) struct sk_buff *skb = lp->current_tx_skb; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); /* Clear the DMA interrupt sources */ SMC_DMA_ACK_IRQ(dev, dma); BUG_ON(skb == NULL); @@ -1198,8 +1196,8 @@ smc911x_tx_dma_irq(int dma, void *data) if (lp->pending_tx_skb != NULL) smc911x_hardware_send_pkt(dev); else { - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, - "%s: No pending Tx packets. DMA disabled\n", dev->name); + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, + "No pending Tx packets. DMA disabled\n"); spin_lock_irqsave(&lp->lock, flags); lp->txdma_active = 0; if (!lp->tx_throttle) { @@ -1208,8 +1206,8 @@ smc911x_tx_dma_irq(int dma, void *data) spin_unlock_irqrestore(&lp->lock, flags); } - DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, - "%s: TX DMA irq completed\n", dev->name); + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, + "TX DMA irq completed\n"); } static void smc911x_rx_dma_irq(int dma, void *data) @@ -1221,8 +1219,8 @@ smc911x_rx_dma_irq(int dma, void *data) unsigned long flags; unsigned int pkts; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); /* Clear the DMA interrupt sources */ SMC_DMA_ACK_IRQ(dev, dma); dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); @@ -1242,9 +1240,9 @@ smc911x_rx_dma_irq(int dma, void *data) lp->rxdma_active = 0; } spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, - "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n", - dev->name, pkts); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, + "RX DMA irq completed. DMA RX FIFO PKTS %d\n", + pkts); } #endif /* SMC_USE_DMA */ @@ -1268,14 +1266,14 @@ static void smc911x_timeout(struct net_device *dev) int status, mask; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); status = SMC_GET_INT(lp); mask = SMC_GET_INT_EN(lp); spin_unlock_irqrestore(&lp->lock, flags); - DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n", - dev->name, status, mask); + DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n", + status, mask); /* Dump the current TX FIFO contents and restart */ mask = SMC_GET_TX_CFG(lp); @@ -1306,7 +1304,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) unsigned int mcr, update_multicast = 0; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); spin_lock_irqsave(&lp->lock, flags); SMC_GET_MAC_CR(lp, mcr); @@ -1314,7 +1312,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) if (dev->flags & IFF_PROMISC) { - DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name); + DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n"); mcr |= MAC_CR_PRMS_; } /* @@ -1323,7 +1321,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) * checked before the table is */ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { - DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name); + DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n"); mcr |= MAC_CR_MCPAS_; } @@ -1363,8 +1361,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) /* now, the table can be loaded into the chipset */ update_multicast = 1; } else { - DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n", - dev->name); + DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n"); mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); /* @@ -1378,9 +1375,9 @@ static void smc911x_set_multicast_list(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); SMC_SET_MAC_CR(lp, mcr); if (update_multicast) { - DBG(SMC_DEBUG_MISC, - "%s: update mcast hash table 0x%08x 0x%08x\n", - dev->name, multicast_table[0], multicast_table[1]); + DBG(SMC_DEBUG_MISC, dev, + "update mcast hash table 0x%08x 0x%08x\n", + multicast_table[0], multicast_table[1]); SMC_SET_HASHL(lp, multicast_table[0]); SMC_SET_HASHH(lp, multicast_table[1]); } @@ -1398,7 +1395,7 @@ smc911x_open(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* reset the hardware */ smc911x_reset(dev); @@ -1425,7 +1422,7 @@ static int smc911x_close(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); netif_stop_queue(dev); netif_carrier_off(dev); @@ -1459,7 +1456,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) int ret, status; unsigned long flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; @@ -1597,16 +1594,16 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) e2p_cmd = SMC_GET_E2P_CMD(lp); for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { - PRINTK("%s: %s timeout waiting for EEPROM to respond\n", - dev->name, __func__); + PRINTK(dev, "%s timeout waiting for EEPROM to respond\n", + __func__); return -EFAULT; } mdelay(1); e2p_cmd = SMC_GET_E2P_CMD(lp); } if (timeout == 0) { - PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", - dev->name, __func__); + PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n", + __func__); return -ETIMEDOUT; } return 0; @@ -1719,7 +1716,7 @@ static int smc911x_findirq(struct net_device *dev) int timeout = 20; unsigned long cookie; - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); cookie = probe_irq_on(); @@ -1799,13 +1796,14 @@ static int smc911x_probe(struct net_device *dev) const char *version_string; unsigned long irq_flags; - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); /* First, see if the endian word is recognized */ val = SMC_GET_BYTE_TEST(lp); - DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); + DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n", + CARDNAME, val); if (val != 0x87654321) { - printk(KERN_ERR "Invalid chip endian 0x%08x\n",val); + netdev_err(dev, "Invalid chip endian 0x%08x\n", val); retval = -ENODEV; goto err_out; } @@ -1816,26 +1814,29 @@ static int smc911x_probe(struct net_device *dev) * as future revisions could be added. */ chip_id = SMC_GET_PN(lp); - DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id); + DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n", + CARDNAME, chip_id); for(i=0;chip_ids[i].id != 0; i++) { if (chip_ids[i].id == chip_id) break; } if (!chip_ids[i].id) { - printk(KERN_ERR "Unknown chip ID %04x\n", chip_id); + netdev_err(dev, "Unknown chip ID %04x\n", chip_id); retval = -ENODEV; goto err_out; } version_string = chip_ids[i].name; revision = SMC_GET_REV(lp); - DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision); + DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision); /* At this point I'll assume that the chip is an SMC911x. */ - DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name); + DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n", + CARDNAME, chip_ids[i].name); /* Validate the TX FIFO size requested */ if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { - printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb); + netdev_err(dev, "Invalid TX FIFO size requested %d\n", + tx_fifo_kb); retval = -EINVAL; goto err_out; } @@ -1887,14 +1888,13 @@ static int smc911x_probe(struct net_device *dev) case 14:/* 1920 Rx Data Fifo Size */ lp->afc_cfg=0x0006032F;break; default: - PRINTK("%s: ERROR -- no AFC_CFG setting found", - dev->name); + PRINTK(dev, "ERROR -- no AFC_CFG setting found"); break; } - DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, - "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, - lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); + DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev, + "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, + lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); spin_lock_init(&lp->lock); @@ -1924,8 +1924,7 @@ static int smc911x_probe(struct net_device *dev) } } if (dev->irq == 0) { - printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", - dev->name); + netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); retval = -ENODEV; goto err_out; } @@ -1980,33 +1979,32 @@ static int smc911x_probe(struct net_device *dev) retval = register_netdev(dev); if (retval == 0) { /* now, print out the card info, in a short format.. */ - printk("%s: %s (rev %d) at %#lx IRQ %d", - dev->name, version_string, lp->revision, - dev->base_addr, dev->irq); + netdev_info(dev, "%s (rev %d) at %#lx IRQ %d", + version_string, lp->revision, + dev->base_addr, dev->irq); #ifdef SMC_USE_DMA if (lp->rxdma != -1) - printk(" RXDMA %d ", lp->rxdma); + pr_cont(" RXDMA %d", lp->rxdma); if (lp->txdma != -1) - printk("TXDMA %d", lp->txdma); + pr_cont(" TXDMA %d", lp->txdma); #endif - printk("\n"); + pr_cont("\n"); if (!is_valid_ether_addr(dev->dev_addr)) { - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", dev->name); + netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); } else { /* Print the Ethernet address */ - printk("%s: Ethernet addr: %pM\n", - dev->name, dev->dev_addr); + netdev_info(dev, "Ethernet addr: %pM\n", + dev->dev_addr); } if (lp->phy_type == 0) { - PRINTK("%s: No PHY found\n", dev->name); + PRINTK(dev, "No PHY found\n"); } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { - PRINTK("%s: LAN911x Internal PHY\n", dev->name); + PRINTK(dev, "LAN911x Internal PHY\n"); } else { - PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type); + PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type); } } @@ -2025,7 +2023,7 @@ err_out: } /* - * smc911x_init(void) + * smc911x_drv_probe(void) * * Output: * 0 --> there is a device @@ -2039,6 +2037,7 @@ static int smc911x_drv_probe(struct platform_device *pdev) void __iomem *addr; int ret; + /* ndev is not valid yet, so avoid passing it in. */ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -2093,7 +2092,7 @@ release_both: release_1: release_mem_region(res->start, SMC911X_IO_EXTENT); out: - printk("%s: not found (%d).\n", CARDNAME, ret); + pr_info("%s: not found (%d).\n", CARDNAME, ret); } #ifdef SMC_USE_DMA else { @@ -2111,7 +2110,7 @@ static int smc911x_drv_remove(struct platform_device *pdev) struct smc911x_local *lp = netdev_priv(ndev); struct resource *res; - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); unregister_netdev(ndev); @@ -2140,7 +2139,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) struct net_device *ndev = platform_get_drvdata(dev); struct smc911x_local *lp = netdev_priv(ndev); - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); if (ndev) { if (netif_running(ndev)) { netif_device_detach(ndev); @@ -2158,7 +2157,7 @@ static int smc911x_drv_resume(struct platform_device *dev) { struct net_device *ndev = platform_get_drvdata(dev); - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); if (ndev) { struct smc911x_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h index d51261ba464..9965da39281 100644 --- a/drivers/net/ethernet/smsc/smc911x.h +++ b/drivers/net/ethernet/smsc/smc911x.h @@ -227,7 +227,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, #define SMC_DMA_ACK_IRQ(dev, dma) \ { \ if (DCSR(dma) & DCSR_BUSERR) { \ - printk("%s: DMA %d bus error!\n", dev->name, dma); \ + netdev_err(dev, "DMA %d bus error!\n", dma); \ } \ DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ } diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index e85c2e7e824..67d9fdeedd8 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -55,7 +55,7 @@ ----------------------------------------------------------------------------*/ static const char version[] = - "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n"; + "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)"; #include <linux/module.h> #include <linux/kernel.h> @@ -95,14 +95,6 @@ static const char version[] = #define USE_32_BIT 1 #endif -#if defined(__H8300H__) || defined(__H8300S__) -#define NO_AUTOPROBE -#undef insl -#undef outsl -#define insl(a,b,l) io_insl_noswap(a,b,l) -#define outsl(a,b,l) io_outsl_noswap(a,b,l) -#endif - /* .the SMC9194 can be at any of the following port addresses. To change, .for a slightly different card, you can add it to the array. Keep in @@ -114,12 +106,6 @@ struct devlist { unsigned int irq; }; -#if defined(CONFIG_H8S_EDOSK2674) -static struct devlist smc_devlist[] __initdata = { - {.port = 0xf80000, .irq = 16}, - {.port = 0, .irq = 0 }, -}; -#else static struct devlist smc_devlist[] __initdata = { {.port = 0x200, .irq = 0}, {.port = 0x220, .irq = 0}, @@ -139,7 +125,6 @@ static struct devlist smc_devlist[] __initdata = { {.port = 0x3E0, .irq = 0}, {.port = 0, .irq = 0}, }; -#endif /* . Wait time for memory to be free. This probably shouldn't be . tuned that much, as waiting for this means nothing else happens @@ -612,7 +597,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) packet_no = inb( ioaddr + PNR_ARR + 1 ); if ( packet_no & 0x80 ) { /* or isn't there? BAD CHIP! */ - printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n"); + netdev_dbg(dev, CARDNAME": Memory allocation failed.\n"); dev_kfree_skb_any(skb); lp->saved_skb = NULL; netif_wake_queue(dev); @@ -625,7 +610,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) /* point to the beginning of the packet */ outw( PTR_AUTOINC , ioaddr + POINTER ); - PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length )); + PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length)); #if SMC_DEBUG > 2 print_packet( buf, length ); #endif @@ -651,11 +636,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) #ifdef USE_32_BIT if ( length & 0x2 ) { outsl(ioaddr + DATA_1, buf, length >> 2 ); -#if !defined(__H8300H__) && !defined(__H8300S__) outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); -#else - ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); -#endif } else outsl(ioaddr + DATA_1, buf, length >> 2 ); @@ -865,7 +846,6 @@ static const struct net_device_ops smc_netdev_ops = { static int __init smc_probe(struct net_device *dev, int ioaddr) { int i, memory, retval; - static unsigned version_printed; unsigned int bank; const char *version_string; @@ -899,7 +879,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) retval = -ENODEV; goto err_out; } -#if !defined(CONFIG_H8S_EDOSK2674) /* well, we've already written once, so hopefully another time won't hurt. This time, I need to switch the bank register to bank 1, so I can access the base address register */ @@ -914,10 +893,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) retval = -ENODEV; goto err_out; } -#else - (void)base_address_register; /* Warning suppression */ -#endif - /* check if the revision register is something that I recognize. These might need to be added to later, as future revisions @@ -937,8 +912,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) It might be prudent to check a listing of MAC addresses against the hardware address, or do some other tests. */ - if (version_printed++ == 0) - printk("%s", version); + pr_info_once("%s\n", version); /* fill in some of the fields */ dev->base_addr = ioaddr; @@ -1027,21 +1001,21 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) /* now, print out the card info, in a short format.. */ - printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name, - version_string, revision_register & 0xF, ioaddr, dev->irq, - if_string, memory ); + netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", + version_string, revision_register & 0xF, ioaddr, dev->irq, + if_string, memory); /* . Print the Ethernet address */ - printk("ADDR: %pM\n", dev->dev_addr); + netdev_info(dev, "ADDR: %pM\n", dev->dev_addr); /* Grab the IRQ */ - retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); - if (retval) { - printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME, - dev->irq, retval); - goto err_out; - } + retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); + if (retval) { + netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n", + DRV_NAME, dev->irq, retval); + goto err_out; + } dev->netdev_ops = &smc_netdev_ops; dev->watchdog_timeo = HZ/20; @@ -1061,30 +1035,32 @@ static void print_packet( byte * buf, int length ) int remainder; int lines; - printk("Packet of length %d\n", length); + pr_dbg("Packet of length %d\n", length); lines = length / 16; remainder = length % 16; for ( i = 0; i < lines ; i ++ ) { int cur; + printk(KERN_DEBUG); for ( cur = 0; cur < 8; cur ++ ) { byte a, b; a = *(buf ++ ); b = *(buf ++ ); - printk("%02x%02x ", a, b ); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); } + printk(KERN_DEBUG); for ( i = 0; i < remainder/2 ; i++ ) { byte a, b; a = *(buf ++ ); b = *(buf ++ ); - printk("%02x%02x ", a, b ); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); #endif } #endif @@ -1151,9 +1127,8 @@ static void smc_timeout(struct net_device *dev) { /* If we get here, some higher level has decided we are broken. There should really be a "kick me" function call instead. */ - printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n", - tx_done(dev) ? "IRQ conflict" : - "network cable problem"); + netdev_warn(dev, CARDNAME": transmit timed out, %s?\n", + tx_done(dev) ? "IRQ conflict" : "network cable problem"); /* "kick" the adaptor */ smc_reset( dev->base_addr ); smc_enable( dev->base_addr ); @@ -1323,8 +1298,7 @@ static void smc_tx( struct net_device * dev ) dev->stats.tx_errors++; if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; if ( tx_status & TS_LATCOL ) { - printk(KERN_DEBUG CARDNAME - ": Late collision occurred on last xmit.\n"); + netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n"); dev->stats.tx_window_errors++; } #if 0 @@ -1332,7 +1306,7 @@ static void smc_tx( struct net_device * dev ) #endif if ( tx_status & TS_SUCCESS ) { - printk(CARDNAME": Successful packet caused interrupt\n"); + netdev_info(dev, CARDNAME": Successful packet caused interrupt\n"); } /* re-enable transmit */ SMC_SELECT_BANK( 0 ); @@ -1571,9 +1545,7 @@ int __init init_module(void) /* copy the parameters from insmod into the device structure */ devSMC9194 = smc_init(-1); - if (IS_ERR(devSMC9194)) - return PTR_ERR(devSMC9194); - return 0; + return PTR_ERR_OR_ZERO(devSMC9194); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 656d2e2ebfc..8ef70d9c20c 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -740,7 +740,7 @@ static int smc91c92_resume(struct pcmcia_device *link) (smc->cardid == PRODID_PSION_NET100))) { i = osi_load_firmware(link); if (i) { - pr_err("smc91c92_cs: Failed to load firmware\n"); + netdev_err(dev, "Failed to load firmware\n"); return i; } } @@ -793,7 +793,7 @@ static int check_sig(struct pcmcia_device *link) } if (width) { - pr_info("using 8-bit IO window\n"); + netdev_info(dev, "using 8-bit IO window\n"); smc91c92_suspend(link); pcmcia_fixup_iowidth(link); @@ -1036,7 +1036,7 @@ static void smc_dump(struct net_device *dev) save = inw(ioaddr + BANK_SELECT); for (w = 0; w < 4; w++) { SMC_SELECT_BANK(w); - netdev_printk(KERN_DEBUG, dev, "bank %d: ", w); + netdev_dbg(dev, "bank %d: ", w); for (i = 0; i < 14; i += 2) pr_cont(" %04x", inw(ioaddr + i)); pr_cont("\n"); @@ -1213,8 +1213,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb, if (smc->saved_skb) { /* THIS SHOULD NEVER HAPPEN. */ dev->stats.tx_aborted_errors++; - netdev_printk(KERN_DEBUG, dev, - "Internal error -- sent packet while busy\n"); + netdev_dbg(dev, "Internal error -- sent packet while busy\n"); return NETDEV_TX_BUSY; } smc->saved_skb = skb; @@ -1254,7 +1253,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb, } /* Otherwise defer until the Tx-space-allocated interrupt. */ - pr_debug("%s: memory allocation deferred.\n", dev->name); + netdev_dbg(dev, "memory allocation deferred.\n"); outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); spin_unlock_irqrestore(&smc->lock, flags); @@ -1317,8 +1316,8 @@ static void smc_eph_irq(struct net_device *dev) SMC_SELECT_BANK(0); ephs = inw(ioaddr + EPH); - pr_debug("%s: Ethernet protocol handler interrupt, status" - " %4.4x.\n", dev->name, ephs); + netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n", + ephs); /* Could be a counter roll-over warning: update stats. */ card_stats = inw(ioaddr + COUNTER); /* single collisions */ @@ -1357,8 +1356,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) ioaddr = dev->base_addr; - pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name, - irq, ioaddr); + netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n", + irq, ioaddr); spin_lock(&smc->lock); smc->watchdog = 0; @@ -1366,8 +1365,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) if ((saved_bank & 0xff00) != 0x3300) { /* The device does not exist -- the card could be off-line, or maybe it has been ejected. */ - pr_debug("%s: SMC91c92 interrupt %d for non-existent" - "/ejected device.\n", dev->name, irq); + netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n", + irq); handled = 0; goto irq_done; } @@ -1380,8 +1379,8 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) do { /* read the status flag, and mask it */ status = inw(ioaddr + INTERRUPT) & 0xff; - pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, - status, mask); + netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n", + status, mask); if ((status & mask) == 0) { if (bogus_cnt == INTR_WORK) handled = 0; @@ -1425,15 +1424,15 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) smc_eph_irq(dev); } while (--bogus_cnt); - pr_debug(" Restoring saved registers mask %2.2x bank %4.4x" - " pointer %4.4x.\n", mask, saved_bank, saved_pointer); + netdev_dbg(dev, " Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n", + mask, saved_bank, saved_pointer); /* restore state register */ outw((mask<<8), ioaddr + INTERRUPT); outw(saved_pointer, ioaddr + POINTER); SMC_SELECT_BANK(saved_bank); - pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq); + netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq); irq_done: @@ -1491,10 +1490,10 @@ static void smc_rx(struct net_device *dev) rx_status = inw(ioaddr + DATA_1); packet_length = inw(ioaddr + DATA_1) & 0x07ff; - pr_debug("%s: Receive status %4.4x length %d.\n", - dev->name, rx_status, packet_length); + netdev_dbg(dev, "Receive status %4.4x length %d.\n", + rx_status, packet_length); - if (!(rx_status & RS_ERRORS)) { + if (!(rx_status & RS_ERRORS)) { /* do stuff to make a new packet */ struct sk_buff *skb; @@ -1502,7 +1501,7 @@ static void smc_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, packet_length+2); if (skb == NULL) { - pr_debug("%s: Low memory, packet dropped.\n", dev->name); + netdev_dbg(dev, "Low memory, packet dropped.\n"); dev->stats.rx_dropped++; outw(MC_RELEASE, ioaddr + MMU_CMD); return; @@ -1643,7 +1642,7 @@ static void smc_reset(struct net_device *dev) struct smc_private *smc = netdev_priv(dev); int i; - pr_debug("%s: smc91c92 reset called.\n", dev->name); + netdev_dbg(dev, "smc91c92 reset called.\n"); /* The first interaction must be a write to bring the chip out of sleep mode. */ diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 73be7f3982e..8bf29eb4a5a 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -58,7 +58,7 @@ * 22/09/04 Nicolas Pitre big update (see commit log for details) */ static const char version[] = - "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n"; + "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>"; /* Debugging level */ #ifndef SMC_DEBUG @@ -82,6 +82,7 @@ static const char version[] = #include <linux/mii.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -149,16 +150,16 @@ MODULE_ALIAS("platform:smc91x"); #define MII_DELAY 1 #if SMC_DEBUG > 0 -#define DBG(n, args...) \ +#define DBG(n, dev, args...) \ do { \ if (SMC_DEBUG >= (n)) \ - printk(args); \ + netdev_dbg(dev, args); \ } while (0) -#define PRINTK(args...) printk(args) +#define PRINTK(dev, args...) netdev_info(dev, args) #else -#define DBG(n, args...) do { } while(0) -#define PRINTK(args...) printk(KERN_DEBUG args) +#define DBG(n, dev, args...) do { } while (0) +#define PRINTK(dev, args...) netdev_dbg(dev, args) #endif #if SMC_DEBUG > 3 @@ -173,24 +174,26 @@ static void PRINT_PKT(u_char *buf, int length) for (i = 0; i < lines ; i ++) { int cur; + printk(KERN_DEBUG); for (cur = 0; cur < 8; cur++) { u_char a, b; a = *buf++; b = *buf++; - printk("%02x%02x ", a, b); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); } + printk(KERN_DEBUG); for (i = 0; i < remainder/2 ; i++) { u_char a, b; a = *buf++; b = *buf++; - printk("%02x%02x ", a, b); + pr_cont("%02x%02x ", a, b); } - printk("\n"); + pr_cont("\n"); } #else -#define PRINT_PKT(x...) do { } while(0) +#define PRINT_PKT(x...) do { } while (0) #endif @@ -226,8 +229,8 @@ static void PRINT_PKT(u_char *buf, int length) unsigned long timeout = jiffies + 2; \ while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \ if (time_after(jiffies, timeout)) { \ - printk("%s: timeout %s line %d\n", \ - dev->name, __FILE__, __LINE__); \ + netdev_dbg(dev, "timeout %s line %d\n", \ + __FILE__, __LINE__); \ break; \ } \ cpu_relax(); \ @@ -246,7 +249,7 @@ static void smc_reset(struct net_device *dev) unsigned int ctl, cfg; struct sk_buff *pending_skb; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); /* Disable all interrupts, block TX tasklet */ spin_lock_irq(&lp->lock); @@ -339,7 +342,7 @@ static void smc_enable(struct net_device *dev) void __iomem *ioaddr = lp->base; int mask; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); /* see the header file for options in TCR/RCR DEFAULT */ SMC_SELECT_BANK(lp, 0); @@ -373,7 +376,7 @@ static void smc_shutdown(struct net_device *dev) void __iomem *ioaddr = lp->base; struct sk_buff *pending_skb; - DBG(2, "%s: %s\n", CARDNAME, __func__); + DBG(2, dev, "%s: %s\n", CARDNAME, __func__); /* no more interrupts for me */ spin_lock_irq(&lp->lock); @@ -406,11 +409,11 @@ static inline void smc_rcv(struct net_device *dev) void __iomem *ioaddr = lp->base; unsigned int packet_number, status, packet_len; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); packet_number = SMC_GET_RXFIFO(lp); if (unlikely(packet_number & RXFIFO_REMPTY)) { - PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); + PRINTK(dev, "smc_rcv with nothing on FIFO.\n"); return; } @@ -420,9 +423,8 @@ static inline void smc_rcv(struct net_device *dev) /* First two words are status and packet length */ SMC_GET_PKT_HDR(lp, status, packet_len); packet_len &= 0x07ff; /* mask off top bits */ - DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", - dev->name, packet_number, status, - packet_len, packet_len); + DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", + packet_number, status, packet_len, packet_len); back: if (unlikely(packet_len < 6 || status & RS_ERRORS)) { @@ -433,8 +435,8 @@ static inline void smc_rcv(struct net_device *dev) } if (packet_len < 6) { /* bloody hardware */ - printk(KERN_ERR "%s: fubar (rxlen %u status %x\n", - dev->name, packet_len, status); + netdev_err(dev, "fubar (rxlen %u status %x\n", + packet_len, status); status |= RS_TOOSHORT; } SMC_WAIT_MMU_BUSY(lp); @@ -551,7 +553,7 @@ static void smc_hardware_send_pkt(unsigned long data) unsigned char *buf; unsigned long flags; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); if (!smc_special_trylock(&lp->lock, flags)) { netif_stop_queue(dev); @@ -568,7 +570,7 @@ static void smc_hardware_send_pkt(unsigned long data) packet_no = SMC_GET_AR(lp); if (unlikely(packet_no & AR_FAILED)) { - printk("%s: Memory allocation failed.\n", dev->name); + netdev_err(dev, "Memory allocation failed.\n"); dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; smc_special_unlock(&lp->lock, flags); @@ -581,8 +583,8 @@ static void smc_hardware_send_pkt(unsigned long data) buf = skb->data; len = skb->len; - DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", - dev->name, packet_no, len, len, buf); + DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", + packet_no, len, len, buf); PRINT_PKT(buf, len); /* @@ -637,7 +639,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int numPages, poll_count, status; unsigned long flags; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); BUG_ON(lp->pending_tx_skb != NULL); @@ -654,7 +656,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) */ numPages = ((skb->len & ~1) + (6 - 1)) >> 8; if (unlikely(numPages > 7)) { - printk("%s: Far too big packet error.\n", dev->name); + netdev_warn(dev, "Far too big packet error.\n"); dev->stats.tx_errors++; dev->stats.tx_dropped++; dev_kfree_skb(skb); @@ -685,7 +687,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!poll_count) { /* oh well, wait until the chip finds memory later */ netif_stop_queue(dev); - DBG(2, "%s: TX memory allocation deferred.\n", dev->name); + DBG(2, dev, "TX memory allocation deferred.\n"); SMC_ENABLE_INT(lp, IM_ALLOC_INT); } else { /* @@ -709,12 +711,12 @@ static void smc_tx(struct net_device *dev) void __iomem *ioaddr = lp->base; unsigned int saved_packet, packet_no, tx_status, pkt_len; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); /* If the TX FIFO is empty then nothing to do */ packet_no = SMC_GET_TXFIFO(lp); if (unlikely(packet_no & TXFIFO_TEMPTY)) { - PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); + PRINTK(dev, "smc_tx with nothing on FIFO.\n"); return; } @@ -725,8 +727,8 @@ static void smc_tx(struct net_device *dev) /* read the first word (status word) from this packet */ SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ); SMC_GET_PKT_HDR(lp, tx_status, pkt_len); - DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", - dev->name, tx_status, packet_no); + DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n", + tx_status, packet_no); if (!(tx_status & ES_TX_SUC)) dev->stats.tx_errors++; @@ -735,14 +737,12 @@ static void smc_tx(struct net_device *dev) dev->stats.tx_carrier_errors++; if (tx_status & (ES_LATCOL | ES_16COL)) { - PRINTK("%s: %s occurred on last xmit\n", dev->name, + PRINTK(dev, "%s occurred on last xmit\n", (tx_status & ES_LATCOL) ? "late collision" : "too many collisions"); dev->stats.tx_window_errors++; if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { - printk(KERN_INFO "%s: unexpectedly large number of " - "bad collisions. Please check duplex " - "setting.\n", dev->name); + netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n"); } } @@ -830,8 +830,8 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) /* Return to idle state */ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); - DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); + DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); SMC_SELECT_BANK(lp, 2); return phydata; @@ -857,8 +857,8 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, /* Return to idle state */ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); - DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", - __func__, phyaddr, phyreg, phydata); + DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); SMC_SELECT_BANK(lp, 2); } @@ -871,7 +871,7 @@ static void smc_phy_detect(struct net_device *dev) struct smc_local *lp = netdev_priv(dev); int phyaddr; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); lp->phy_type = 0; @@ -886,8 +886,8 @@ static void smc_phy_detect(struct net_device *dev) id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); - DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n", - dev->name, id1, id2); + DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n", + id1, id2); /* Make sure it is a valid identifier */ if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && @@ -910,7 +910,7 @@ static int smc_phy_fixed(struct net_device *dev) int phyaddr = lp->mii.phy_id; int bmcr, cfg1; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); /* Enter Link Disable state */ cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); @@ -1044,7 +1044,7 @@ static void smc_phy_configure(struct work_struct *work) int my_ad_caps; /* My Advertised capabilities */ int status; - DBG(3, "%s:smc_program_phy()\n", dev->name); + DBG(3, dev, "smc_program_phy()\n"); spin_lock_irq(&lp->lock); @@ -1055,7 +1055,7 @@ static void smc_phy_configure(struct work_struct *work) goto smc_phy_configure_exit; if (smc_phy_reset(dev, phyaddr)) { - printk("%s: PHY reset timed out\n", dev->name); + netdev_info(dev, "PHY reset timed out\n"); goto smc_phy_configure_exit; } @@ -1082,7 +1082,7 @@ static void smc_phy_configure(struct work_struct *work) my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { - printk(KERN_INFO "Auto negotiation NOT supported\n"); + netdev_info(dev, "Auto negotiation NOT supported\n"); smc_phy_fixed(dev); goto smc_phy_configure_exit; } @@ -1118,8 +1118,8 @@ static void smc_phy_configure(struct work_struct *work) */ status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); - DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps); - DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps); + DBG(2, dev, "phy caps=%x\n", my_phy_caps); + DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps); /* Restart auto-negotiation process in order to advertise my caps */ smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); @@ -1143,7 +1143,7 @@ static void smc_phy_interrupt(struct net_device *dev) int phyaddr = lp->mii.phy_id; int phy18; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); if (lp->phy_type == 0) return; @@ -1179,8 +1179,8 @@ static void smc_10bt_check_media(struct net_device *dev, int init) netif_carrier_on(dev); } if (netif_msg_link(lp)) - printk(KERN_INFO "%s: link %s\n", dev->name, - new_carrier ? "up" : "down"); + netdev_info(dev, "link %s\n", + new_carrier ? "up" : "down"); } } @@ -1211,7 +1211,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) int status, mask, timeout, card_stats; int saved_pointer; - DBG(3, "%s: %s\n", dev->name, __func__); + DBG(3, dev, "%s\n", __func__); spin_lock(&lp->lock); @@ -1230,12 +1230,12 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) do { status = SMC_GET_INT(lp); - DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", - dev->name, status, mask, - ({ int meminfo; SMC_SELECT_BANK(lp, 0); - meminfo = SMC_GET_MIR(lp); - SMC_SELECT_BANK(lp, 2); meminfo; }), - SMC_GET_FIFO(lp)); + DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", + status, mask, + ({ int meminfo; SMC_SELECT_BANK(lp, 0); + meminfo = SMC_GET_MIR(lp); + SMC_SELECT_BANK(lp, 2); meminfo; }), + SMC_GET_FIFO(lp)); status &= mask; if (!status) @@ -1243,20 +1243,20 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) if (status & IM_TX_INT) { /* do this before RX as it will free memory quickly */ - DBG(3, "%s: TX int\n", dev->name); + DBG(3, dev, "TX int\n"); smc_tx(dev); SMC_ACK_INT(lp, IM_TX_INT); if (THROTTLE_TX_PKTS) netif_wake_queue(dev); } else if (status & IM_RCV_INT) { - DBG(3, "%s: RX irq\n", dev->name); + DBG(3, dev, "RX irq\n"); smc_rcv(dev); } else if (status & IM_ALLOC_INT) { - DBG(3, "%s: Allocation irq\n", dev->name); + DBG(3, dev, "Allocation irq\n"); tasklet_hi_schedule(&lp->tx_task); mask &= ~IM_ALLOC_INT; } else if (status & IM_TX_EMPTY_INT) { - DBG(3, "%s: TX empty\n", dev->name); + DBG(3, dev, "TX empty\n"); mask &= ~IM_TX_EMPTY_INT; /* update stats */ @@ -1271,10 +1271,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) /* multiple collisions */ dev->stats.collisions += card_stats & 0xF; } else if (status & IM_RX_OVRN_INT) { - DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, - ({ int eph_st; SMC_SELECT_BANK(lp, 0); - eph_st = SMC_GET_EPH_STATUS(lp); - SMC_SELECT_BANK(lp, 2); eph_st; })); + DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n", + ({ int eph_st; SMC_SELECT_BANK(lp, 0); + eph_st = SMC_GET_EPH_STATUS(lp); + SMC_SELECT_BANK(lp, 2); eph_st; })); SMC_ACK_INT(lp, IM_RX_OVRN_INT); dev->stats.rx_errors++; dev->stats.rx_fifo_errors++; @@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) smc_phy_interrupt(dev); } else if (status & IM_ERCV_INT) { SMC_ACK_INT(lp, IM_ERCV_INT); - PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name); + PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n"); } } while (--timeout); @@ -1296,11 +1296,11 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) #ifndef CONFIG_NET_POLL_CONTROLLER if (timeout == MAX_IRQ_LOOPS) - PRINTK("%s: spurious interrupt (mask = 0x%02x)\n", - dev->name, mask); + PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n", + mask); #endif - DBG(3, "%s: Interrupt done (%d loops)\n", - dev->name, MAX_IRQ_LOOPS - timeout); + DBG(3, dev, "Interrupt done (%d loops)\n", + MAX_IRQ_LOOPS - timeout); /* * We return IRQ_HANDLED unconditionally here even if there was @@ -1333,7 +1333,7 @@ static void smc_timeout(struct net_device *dev) void __iomem *ioaddr = lp->base; int status, mask, eph_st, meminfo, fifo; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); spin_lock_irq(&lp->lock); status = SMC_GET_INT(lp); @@ -1344,9 +1344,8 @@ static void smc_timeout(struct net_device *dev) meminfo = SMC_GET_MIR(lp); SMC_SELECT_BANK(lp, 2); spin_unlock_irq(&lp->lock); - PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x " - "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", - dev->name, status, mask, meminfo, fifo, eph_st ); + PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", + status, mask, meminfo, fifo, eph_st); smc_reset(dev); smc_enable(dev); @@ -1377,10 +1376,10 @@ static void smc_set_multicast_list(struct net_device *dev) unsigned char multicast_table[8]; int update_multicast = 0; - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); if (dev->flags & IFF_PROMISC) { - DBG(2, "%s: RCR_PRMS\n", dev->name); + DBG(2, dev, "RCR_PRMS\n"); lp->rcr_cur_mode |= RCR_PRMS; } @@ -1395,7 +1394,7 @@ static void smc_set_multicast_list(struct net_device *dev) * checked before the table is */ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { - DBG(2, "%s: RCR_ALMUL\n", dev->name); + DBG(2, dev, "RCR_ALMUL\n"); lp->rcr_cur_mode |= RCR_ALMUL; } @@ -1437,7 +1436,7 @@ static void smc_set_multicast_list(struct net_device *dev) /* now, the table can be loaded into the chipset */ update_multicast = 1; } else { - DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name); + DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n"); lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); /* @@ -1470,7 +1469,7 @@ smc_open(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); /* Setup the default Register Modes */ lp->tcr_cur_mode = TCR_DEFAULT; @@ -1514,7 +1513,7 @@ static int smc_close(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); - DBG(2, "%s: %s\n", dev->name, __func__); + DBG(2, dev, "%s\n", __func__); netif_stop_queue(dev); netif_carrier_off(dev); @@ -1694,7 +1693,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev, int i; int imax; - DBG(1, "Reading %d bytes at %d(0x%x)\n", + DBG(1, dev, "Reading %d bytes at %d(0x%x)\n", eeprom->len, eeprom->offset, eeprom->offset); imax = smc_ethtool_geteeprom_len(dev); for (i = 0; i < eeprom->len; i += 2) { @@ -1706,7 +1705,7 @@ static int smc_ethtool_geteeprom(struct net_device *dev, ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); if (ret != 0) return ret; - DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); + DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); data[i] = (wbuf >> 8) & 0xff; data[i+1] = wbuf & 0xff; } @@ -1719,8 +1718,8 @@ static int smc_ethtool_seteeprom(struct net_device *dev, int i; int imax; - DBG(1, "Writing %d bytes to %d(0x%x)\n", - eeprom->len, eeprom->offset, eeprom->offset); + DBG(1, dev, "Writing %d bytes to %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); imax = smc_ethtool_geteeprom_len(dev); for (i = 0; i < eeprom->len; i += 2) { int ret; @@ -1729,7 +1728,7 @@ static int smc_ethtool_seteeprom(struct net_device *dev, if (offset > imax) break; wbuf = (data[i] << 8) | data[i + 1]; - DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); + DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); if (ret != 0) return ret; @@ -1784,7 +1783,7 @@ static int smc_findirq(struct smc_local *lp) int timeout = 20; unsigned long cookie; - DBG(2, "%s: %s\n", CARDNAME, __func__); + DBG(2, dev, "%s: %s\n", CARDNAME, __func__); cookie = probe_irq_on(); @@ -1856,21 +1855,21 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, unsigned long irq_flags) { struct smc_local *lp = netdev_priv(dev); - static int version_printed = 0; int retval; unsigned int val, revision_register; const char *version_string; - DBG(2, "%s: %s\n", CARDNAME, __func__); + DBG(2, dev, "%s: %s\n", CARDNAME, __func__); /* First, see if the high byte is 0x33 */ val = SMC_CURRENT_BANK(lp); - DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); + DBG(2, dev, "%s: bank signature probe returned 0x%04x\n", + CARDNAME, val); if ((val & 0xFF00) != 0x3300) { if ((val & 0xFF) == 0x33) { - printk(KERN_WARNING - "%s: Detected possible byte-swapped interface" - " at IOADDR %p\n", CARDNAME, ioaddr); + netdev_warn(dev, + "%s: Detected possible byte-swapped interface at IOADDR %p\n", + CARDNAME, ioaddr); } retval = -ENODEV; goto err_out; @@ -1897,8 +1896,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, val = SMC_GET_BASE(lp); val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { - printk("%s: IOADDR %p doesn't match configuration (%x).\n", - CARDNAME, ioaddr, val); + netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n", + CARDNAME, ioaddr, val); } /* @@ -1908,21 +1907,19 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, */ SMC_SELECT_BANK(lp, 3); revision_register = SMC_GET_REV(lp); - DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register); + DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register); version_string = chip_ids[ (revision_register >> 4) & 0xF]; if (!version_string || (revision_register & 0xff00) != 0x3300) { /* I don't recognize this chip, so... */ - printk("%s: IO %p: Unrecognized revision register 0x%04x" - ", Contact author.\n", CARDNAME, - ioaddr, revision_register); + netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n", + CARDNAME, ioaddr, revision_register); retval = -ENODEV; goto err_out; } /* At this point I'll assume that the chip is an SMC91x. */ - if (version_printed++ == 0) - printk("%s", version); + pr_info_once("%s\n", version); /* fill in some of the fields */ dev->base_addr = (unsigned long)ioaddr; @@ -1940,7 +1937,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, /* * If dev->irq is 0, then the device has to be banged on to see * what the IRQ is. - * + * * This banging doesn't always detect the IRQ, for unknown reasons. * a workaround is to reset the chip and try again. * @@ -1965,8 +1962,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, } } if (dev->irq == 0) { - printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", - dev->name); + netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); retval = -ENODEV; goto err_out; } @@ -2030,32 +2026,31 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, retval = register_netdev(dev); if (retval == 0) { /* now, print out the card info, in a short format.. */ - printk("%s: %s (rev %d) at %p IRQ %d", - dev->name, version_string, revision_register & 0x0f, - lp->base, dev->irq); + netdev_info(dev, "%s (rev %d) at %p IRQ %d", + version_string, revision_register & 0x0f, + lp->base, dev->irq); if (dev->dma != (unsigned char)-1) - printk(" DMA %d", dev->dma); + pr_cont(" DMA %d", dev->dma); - printk("%s%s\n", + pr_cont("%s%s\n", lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); if (!is_valid_ether_addr(dev->dev_addr)) { - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", dev->name); + netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); } else { /* Print the Ethernet address */ - printk("%s: Ethernet addr: %pM\n", - dev->name, dev->dev_addr); + netdev_info(dev, "Ethernet addr: %pM\n", + dev->dev_addr); } if (lp->phy_type == 0) { - PRINTK("%s: No PHY found\n", dev->name); + PRINTK(dev, "No PHY found\n"); } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { - PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name); + PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n"); } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { - PRINTK("%s: PHY LAN83C180\n", dev->name); + PRINTK(dev, "PHY LAN83C180\n"); } } @@ -2165,7 +2160,8 @@ static inline void smc_request_datacs(struct platform_device *pdev, struct net_d return; if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { - printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); + netdev_info(ndev, "%s: failed to request datacs memory region.\n", + CARDNAME); return; } @@ -2189,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * } } +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +}; +MODULE_DEVICE_TABLE(of, smc91x_match); +#endif + /* * smc_init(void) * Input parameters: @@ -2203,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * static int smc_drv_probe(struct platform_device *pdev) { struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); + const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; struct resource *res, *ires; @@ -2222,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev) */ lp = netdev_priv(ndev); + lp->cfg.flags = 0; if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); - } else { + } + +#if IS_BUILTIN(CONFIG_OF) + match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); + if (match) { + struct device_node *np = pdev->dev.of_node; + u32 val; + + /* Combination of IO widths supported, default to 16-bit */ + if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (val & 1) + lp->cfg.flags |= SMC91X_USE_8BIT; + if ((val == 0) || (val & 2)) + lp->cfg.flags |= SMC91X_USE_16BIT; + if (val & 4) + lp->cfg.flags |= SMC91X_USE_32BIT; + } else { + lp->cfg.flags |= SMC91X_USE_16BIT; + } + } +#endif + + if (!pd && !match) { lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; @@ -2307,7 +2336,7 @@ static int smc_drv_probe(struct platform_device *pdev) out_free_netdev: free_netdev(ndev); out: - printk("%s: not found (%d).\n", CARDNAME, ret); + pr_info("%s: not found (%d).\n", CARDNAME, ret); return ret; } @@ -2375,15 +2404,6 @@ static int smc_drv_resume(struct device *dev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id smc91x_match[] = { - { .compatible = "smsc,lan91c94", }, - { .compatible = "smsc,lan91c111", }, - {}, -}; -MODULE_DEVICE_TABLE(of, smc91x_match); -#endif - static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 5730fe2445a..749654b976b 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -46,7 +46,8 @@ defined(CONFIG_MACH_LITTLETON) ||\ defined(CONFIG_MACH_ZYLONITE2) ||\ defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) + defined(CONFIG_MACH_STARGATE2) ||\ + defined(CONFIG_ARCH_VERSATILE) #include <asm/mach-types.h> @@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) #define SMC_IRQ_FLAGS (-1) /* from resource */ /* We actually can't write halfwords properly if not word aligned */ @@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define RPC_LSA_DEFAULT RPC_LED_TX_RX #define RPC_LSB_DEFAULT RPC_LED_100_10 -#elif defined(CONFIG_ARCH_VERSATILE) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - #elif defined(CONFIG_MN10300) /* @@ -907,8 +893,8 @@ static const char * chip_ids[ 16 ] = { ({ \ int __b = SMC_CURRENT_BANK(lp); \ if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ - printk( "%s: bank reg screwed (0x%04x)\n", \ - CARDNAME, __b ); \ + pr_err("%s: bank reg screwed (0x%04x)\n", \ + CARDNAME, __b); \ BUG(); \ } \ reg<<SMC_IO_SHIFT; \ @@ -1124,8 +1110,7 @@ static const char * chip_ids[ 16 ] = { void __iomem *__ioaddr = ioaddr; \ if (__len >= 2 && (unsigned long)__ptr & 2) { \ __len -= 2; \ - SMC_outw(*(u16 *)__ptr, ioaddr, \ - DATA_REG(lp)); \ + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ __ptr += 2; \ } \ if (SMC_CAN_USE_DATACS && lp->datacs) \ @@ -1133,8 +1118,7 @@ static const char * chip_ids[ 16 ] = { SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ if (__len & 2) { \ __ptr += (__len & ~3); \ - SMC_outw(*((u16 *)__ptr), ioaddr, \ - DATA_REG(lp)); \ + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ } \ } else if (SMC_16BIT(lp)) \ SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 5fdbc2686eb..8564f23a679 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2167,7 +2167,7 @@ static int smsc911x_init(struct net_device *dev) udelay(1000); if (to == 0) { - pr_err("Device not READY in 100ms aborting\n"); + netdev_err(dev, "Device not READY in 100ms aborting\n"); return -ENODEV; } @@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) SMSC_TRACE(pdata, probe, "MAC Address is specified by configuration"); } else if (is_valid_ether_addr(pdata->config.mac)) { - memcpy(dev->dev_addr, pdata->config.mac, 6); + memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN); SMSC_TRACE(pdata, probe, "MAC Address specified by platform data"); } else { diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 5f9e79f7f2d..f433d97aa09 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -19,6 +19,8 @@ *************************************************************************** */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/netdevice.h> @@ -33,7 +35,6 @@ #include "smsc9420.h" #define DRV_NAME "smsc9420" -#define PFX DRV_NAME ": " #define DRV_MDIONAME "smsc9420-mdio" #define DRV_DESCRIPTION "SMSC LAN9420 driver" #define DRV_VERSION "1.01" @@ -97,21 +98,6 @@ static uint debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "debug level"); -#define smsc_dbg(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_DEBUG PFX f "\n", ## a); \ -} while (0) - -#define smsc_info(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_INFO PFX f "\n", ## a); \ -} while (0) - -#define smsc_warn(TYPE, f, a...) \ -do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \ - printk(KERN_WARNING PFX f "\n", ## a); \ -} while (0) - static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) { return ioread32(pd->ioaddr + offset); @@ -140,7 +126,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) /* confirm MII not busy */ if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { - smsc_warn(DRV, "MII is busy???"); + netif_warn(pd, drv, pd->dev, "MII is busy???\n"); goto out; } @@ -159,7 +145,7 @@ static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) udelay(10); } - smsc_warn(DRV, "MII busy timeout!"); + netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); out: spin_unlock_irqrestore(&pd->phy_lock, flags); @@ -178,7 +164,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, /* confirm MII not busy */ if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { - smsc_warn(DRV, "MII is busy???"); + netif_warn(pd, drv, pd->dev, "MII is busy???\n"); goto out; } @@ -200,7 +186,7 @@ static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, udelay(10); } - smsc_warn(DRV, "MII busy timeout!"); + netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); out: spin_unlock_irqrestore(&pd->phy_lock, flags); @@ -222,7 +208,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) BUG_ON(!pd); if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { - smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy"); + netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__); return -EIO; } @@ -235,7 +221,7 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) return 0; } while (timeout--); - smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out"); + netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__); return -EIO; } @@ -347,9 +333,9 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) int timeout = 100; u32 e2cmd; - smsc_dbg(HW, "op 0x%08x", op); + netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op); if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { - smsc_warn(HW, "Busy at start"); + netif_warn(pd, hw, pd->dev, "Busy at start\n"); return -EBUSY; } @@ -362,12 +348,13 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); if (!timeout) { - smsc_info(HW, "TIMED OUT"); + netif_info(pd, hw, pd->dev, "TIMED OUT\n"); return -EAGAIN; } if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { - smsc_info(HW, "Error occurred during eeprom operation"); + netif_info(pd, hw, pd->dev, + "Error occurred during eeprom operation\n"); return -EINVAL; } @@ -380,7 +367,7 @@ static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, u32 op = E2P_CMD_EPC_CMD_READ_ | address; int ret; - smsc_dbg(HW, "address 0x%x", address); + netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address); ret = smsc9420_eeprom_send_cmd(pd, op); if (!ret) @@ -395,7 +382,7 @@ static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; int ret; - smsc_dbg(HW, "address 0x%x, data 0x%x", address, data); + netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data); ret = smsc9420_eeprom_send_cmd(pd, op); if (!ret) { @@ -492,7 +479,8 @@ static void smsc9420_check_mac_address(struct net_device *dev) /* Check if mac address has been specified when bringing interface up */ if (is_valid_ether_addr(dev->dev_addr)) { smsc9420_set_mac_address(dev); - smsc_dbg(PROBE, "MAC Address is specified by configuration"); + netif_dbg(pd, probe, pd->dev, + "MAC Address is specified by configuration\n"); } else { /* Try reading mac address from device. if EEPROM is present * it will already have been set */ @@ -507,12 +495,14 @@ static void smsc9420_check_mac_address(struct net_device *dev) if (is_valid_ether_addr(dev->dev_addr)) { /* eeprom values are valid so use them */ - smsc_dbg(PROBE, "Mac Address is read from EEPROM"); + netif_dbg(pd, probe, pd->dev, + "Mac Address is read from EEPROM\n"); } else { /* eeprom values are invalid, generate random MAC */ eth_hw_addr_random(dev); smsc9420_set_mac_address(dev); - smsc_dbg(PROBE, "MAC Address is set to random"); + netif_dbg(pd, probe, pd->dev, + "MAC Address is set to random\n"); } } } @@ -535,7 +525,7 @@ static void smsc9420_stop_tx(struct smsc9420_pdata *pd) } if (!timeout) - smsc_warn(IFDOWN, "TX DMAC failed to stop"); + netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n"); /* ACK Tx DMAC stop bit */ smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); @@ -646,7 +636,8 @@ static void smsc9420_stop_rx(struct smsc9420_pdata *pd) } if (!timeout) - smsc_warn(IFDOWN, "RX DMAC did not stop! timeout."); + netif_warn(pd, ifdown, pd->dev, + "RX DMAC did not stop! timeout\n"); /* ACK the Rx DMAC stop bit */ smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); @@ -736,7 +727,7 @@ static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) smsc9420_reg_read(pd, BUS_MODE); udelay(2); if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) - smsc_warn(DRV, "Software reset not cleared"); + netif_warn(pd, drv, pd->dev, "Software reset not cleared\n"); } static int smsc9420_stop(struct net_device *dev) @@ -855,7 +846,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) PKT_BUF_SZ, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(pd->pdev, mapping)) { dev_kfree_skb_any(skb); - smsc_warn(RX_ERR, "pci_map_single failed!"); + netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); return -ENOMEM; } @@ -1004,7 +995,8 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, mapping = pci_map_single(pd->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(pd->pdev, mapping)) { - smsc_warn(TX_ERR, "pci_map_single failed, dropping packet"); + netif_warn(pd, tx_err, pd->dev, + "pci_map_single failed, dropping packet\n"); return NETDEV_TX_BUSY; } @@ -1056,12 +1048,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev) u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); if (dev->flags & IFF_PROMISC) { - smsc_dbg(HW, "Promiscuous Mode Enabled"); + netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n"); mac_cr |= MAC_CR_PRMS_; mac_cr &= (~MAC_CR_MCPAS_); mac_cr &= (~MAC_CR_HPFILT_); } else if (dev->flags & IFF_ALLMULTI) { - smsc_dbg(HW, "Receive all Multicast Enabled"); + netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n"); mac_cr &= (~MAC_CR_PRMS_); mac_cr |= MAC_CR_MCPAS_; mac_cr &= (~MAC_CR_HPFILT_); @@ -1069,7 +1061,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev) struct netdev_hw_addr *ha; u32 hash_lo = 0, hash_hi = 0; - smsc_dbg(HW, "Multicast filter enabled"); + netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n"); netdev_for_each_mc_addr(ha, dev) { u32 bit_num = smsc9420_hash(ha->addr); u32 mask = 1 << (bit_num & 0x1F); @@ -1087,7 +1079,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev) mac_cr &= (~MAC_CR_MCPAS_); mac_cr |= MAC_CR_HPFILT_; } else { - smsc_dbg(HW, "Receive own packets only."); + netif_dbg(pd, hw, pd->dev, "Receive own packets only\n"); smsc9420_reg_write(pd, HASHH, 0); smsc9420_reg_write(pd, HASHL, 0); @@ -1115,11 +1107,11 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) else flow = 0; - smsc_info(LINK, "rx pause %s, tx pause %s", - (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), - (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n", + cap & FLOW_CTRL_RX ? "enabled" : "disabled", + cap & FLOW_CTRL_TX ? "enabled" : "disabled"); } else { - smsc_info(LINK, "half duplex"); + netif_info(pd, link, pd->dev, "half duplex\n"); flow = 0; } @@ -1137,10 +1129,10 @@ static void smsc9420_phy_adjust_link(struct net_device *dev) if (phy_dev->duplex != pd->last_duplex) { u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); if (phy_dev->duplex) { - smsc_dbg(LINK, "full duplex mode"); + netif_dbg(pd, link, pd->dev, "full duplex mode\n"); mac_cr |= MAC_CR_FDPX_; } else { - smsc_dbg(LINK, "half duplex mode"); + netif_dbg(pd, link, pd->dev, "half duplex mode\n"); mac_cr &= ~MAC_CR_FDPX_; } smsc9420_reg_write(pd, MAC_CR, mac_cr); @@ -1152,9 +1144,9 @@ static void smsc9420_phy_adjust_link(struct net_device *dev) carrier = netif_carrier_ok(dev); if (carrier != pd->last_carrier) { if (carrier) - smsc_dbg(LINK, "carrier OK"); + netif_dbg(pd, link, pd->dev, "carrier OK\n"); else - smsc_dbg(LINK, "no carrier"); + netif_dbg(pd, link, pd->dev, "no carrier\n"); pd->last_carrier = carrier; } } @@ -1168,24 +1160,24 @@ static int smsc9420_mii_probe(struct net_device *dev) /* Device only supports internal PHY at address 1 */ if (!pd->mii_bus->phy_map[1]) { - pr_err("%s: no PHY found at address 1\n", dev->name); + netdev_err(dev, "no PHY found at address 1\n"); return -ENODEV; } phydev = pd->mii_bus->phy_map[1]; - smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr, - phydev->phy_id); + netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", + phydev->addr, phydev->phy_id); phydev = phy_connect(dev, dev_name(&phydev->dev), smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { - pr_err("%s: Could not attach to PHY\n", dev->name); + netdev_err(dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } - pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); /* mask with MAC supported features */ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | @@ -1223,12 +1215,12 @@ static int smsc9420_mii_init(struct net_device *dev) pd->mii_bus->phy_mask = ~(1 << 1); if (mdiobus_register(pd->mii_bus)) { - smsc_warn(PROBE, "Error registering mii bus"); + netif_warn(pd, probe, pd->dev, "Error registering mii bus\n"); goto err_out_free_bus_2; } if (smsc9420_mii_probe(dev) < 0) { - smsc_warn(PROBE, "Error probing mii bus"); + netif_warn(pd, probe, pd->dev, "Error probing mii bus\n"); goto err_out_unregister_bus_3; } @@ -1281,12 +1273,11 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) BUG_ON(!pd->rx_ring); - pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) * - RX_RING_SIZE), GFP_KERNEL); - if (pd->rx_buffers == NULL) { - smsc_warn(IFUP, "Failed to allocated rx_buffers"); + pd->rx_buffers = kmalloc_array(RX_RING_SIZE, + sizeof(struct smsc9420_ring_info), + GFP_KERNEL); + if (pd->rx_buffers == NULL) goto out; - } /* initialize the rx ring */ for (i = 0; i < RX_RING_SIZE; i++) { @@ -1301,7 +1292,8 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) /* now allocate the entire ring of skbs */ for (i = 0; i < RX_RING_SIZE; i++) { if (smsc9420_alloc_rx_buffer(pd, i)) { - smsc_warn(IFUP, "failed to allocate rx skb %d", i); + netif_warn(pd, ifup, pd->dev, + "failed to allocate rx skb %d\n", i); goto out_free_rx_skbs; } } @@ -1310,13 +1302,14 @@ static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) pd->rx_ring_tail = 0; smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); - smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1)); + netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n", + smsc9420_reg_read(pd, VLAN1)); if (pd->rx_csum) { /* Enable RX COE */ u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; smsc9420_reg_write(pd, COE_CR, coe); - smsc_dbg(IFUP, "COE_CR = 0x%08x", coe); + netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe); } smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); @@ -1339,7 +1332,8 @@ static int smsc9420_open(struct net_device *dev) int result = 0, timeout; if (!is_valid_ether_addr(dev->dev_addr)) { - smsc_warn(IFUP, "dev_addr is not a valid MAC address"); + netif_warn(pd, ifup, pd->dev, + "dev_addr is not a valid MAC address\n"); result = -EADDRNOTAVAIL; goto out_0; } @@ -1358,7 +1352,7 @@ static int smsc9420_open(struct net_device *dev) result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd); if (result) { - smsc_warn(IFUP, "Unable to use IRQ = %d", irq); + netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq); result = -ENODEV; goto out_0; } @@ -1393,7 +1387,7 @@ static int smsc9420_open(struct net_device *dev) smsc9420_pci_flush_write(pd); /* test the IRQ connection to the ISR */ - smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq); + netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq); pd->software_irq_signal = false; spin_lock_irqsave(&pd->int_lock, flags); @@ -1423,30 +1417,32 @@ static int smsc9420_open(struct net_device *dev) spin_unlock_irqrestore(&pd->int_lock, flags); if (!pd->software_irq_signal) { - smsc_warn(IFUP, "ISR failed signaling test"); + netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n"); result = -ENODEV; goto out_free_irq_1; } - smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq); + netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq); result = smsc9420_alloc_tx_ring(pd); if (result) { - smsc_warn(IFUP, "Failed to Initialize tx dma ring"); + netif_warn(pd, ifup, pd->dev, + "Failed to Initialize tx dma ring\n"); result = -ENOMEM; goto out_free_irq_1; } result = smsc9420_alloc_rx_ring(pd); if (result) { - smsc_warn(IFUP, "Failed to Initialize rx dma ring"); + netif_warn(pd, ifup, pd->dev, + "Failed to Initialize rx dma ring\n"); result = -ENOMEM; goto out_free_tx_ring_2; } result = smsc9420_mii_init(dev); if (result) { - smsc_warn(IFUP, "Failed to initialize Phy"); + netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n"); result = -ENODEV; goto out_free_rx_ring_3; } @@ -1547,7 +1543,8 @@ static int smsc9420_resume(struct pci_dev *pdev) err = pci_enable_wake(pdev, 0, 0); if (err) - smsc_warn(IFUP, "pci_enable_wake failed: %d", err); + netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n", + err); if (netif_running(dev)) { /* FIXME: gross. It looks like ancient PM relic.*/ @@ -1582,12 +1579,12 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) int result = 0; u32 id_rev; - printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n"); + pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION); /* First do the PCI initialisation */ result = pci_enable_device(pdev); if (unlikely(result)) { - printk(KERN_ERR "Cannot enable smsc9420\n"); + pr_err("Cannot enable smsc9420\n"); goto out_0; } @@ -1600,24 +1597,24 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) SET_NETDEV_DEV(dev, &pdev->dev); if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) { - printk(KERN_ERR "Cannot find PCI device base address\n"); + netdev_err(dev, "Cannot find PCI device base address\n"); goto out_free_netdev_2; } if ((pci_request_regions(pdev, DRV_NAME))) { - printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n"); + netdev_err(dev, "Cannot obtain PCI resources, aborting\n"); goto out_free_netdev_2; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { - printk(KERN_ERR "No usable DMA configuration, aborting.\n"); + netdev_err(dev, "No usable DMA configuration, aborting\n"); goto out_free_regions_3; } virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR), pci_resource_len(pdev, SMSC_BAR)); if (!virt_addr) { - printk(KERN_ERR "Cannot map device registers, aborting.\n"); + netdev_err(dev, "Cannot map device registers, aborting\n"); goto out_free_regions_3; } @@ -1646,16 +1643,17 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) pd->msg_enable = smsc_debug; pd->rx_csum = true; - smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr); + netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr); id_rev = smsc9420_reg_read(pd, ID_REV); switch (id_rev & 0xFFFF0000) { case 0x94200000: - smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev); + netif_info(pd, probe, pd->dev, + "LAN9420 identified, ID_REV=0x%08X\n", id_rev); break; default: - smsc_warn(PROBE, "LAN9420 NOT identified"); - smsc_warn(PROBE, "ID_REV=0x%08X", id_rev); + netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n"); + netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev); goto out_free_dmadesc_5; } @@ -1670,7 +1668,8 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) result = register_netdev(dev); if (result) { - smsc_warn(PROBE, "error %i registering device", result); + netif_warn(pd, probe, pd->dev, "error %i registering device\n", + result); goto out_free_dmadesc_5; } @@ -1707,8 +1706,6 @@ static void smsc9420_remove(struct pci_dev *pdev) if (!dev) return; - pci_set_drvdata(pdev, NULL); - pd = netdev_priv(dev); unregister_netdev(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7eb8babed2c..fc94f202a43 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -451,14 +451,14 @@ struct mac_device_info { struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr); struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); -extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], - unsigned int high, unsigned int low); -extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, - unsigned int high, unsigned int low); +void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); -extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); +void stmmac_set_mac(void __iomem *ioaddr, bool enable); -extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; extern const struct stmmac_chain_mode_ops chain_mode_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 8e5662ce488..def266da55d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -104,14 +104,13 @@ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ -extern void dwmac_enable_dma_transmission(void __iomem *ioaddr); -extern void dwmac_enable_dma_irq(void __iomem *ioaddr); -extern void dwmac_disable_dma_irq(void __iomem *ioaddr); -extern void dwmac_dma_start_tx(void __iomem *ioaddr); -extern void dwmac_dma_stop_tx(void __iomem *ioaddr); -extern void dwmac_dma_start_rx(void __iomem *ioaddr); -extern void dwmac_dma_stop_rx(void __iomem *ioaddr); -extern int dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x); +void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_irq(void __iomem *ioaddr); +void dwmac_disable_dma_irq(void __iomem *ioaddr); +void dwmac_dma_start_tx(void __iomem *ioaddr); +void dwmac_dma_stop_tx(void __iomem *ioaddr); +void dwmac_dma_start_rx(void __iomem *ioaddr); +void dwmac_dma_stop_rx(void __iomem *ioaddr); +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 48ec001566b..8607488cbcf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -128,8 +128,8 @@ struct stmmac_counters { unsigned int mmc_rx_icmp_err_octets; }; -extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); -extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); -extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); +void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); +void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); +void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); #endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f16a9bdf45b..22f89ffdfd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -110,14 +110,14 @@ struct stmmac_priv { extern int phyaddr; -extern int stmmac_mdio_unregister(struct net_device *ndev); -extern int stmmac_mdio_register(struct net_device *ndev); -extern void stmmac_set_ethtool_ops(struct net_device *netdev); +int stmmac_mdio_unregister(struct net_device *ndev); +int stmmac_mdio_register(struct net_device *ndev); +void stmmac_set_ethtool_ops(struct net_device *netdev); extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; extern const struct stmmac_hwtimestamp stmmac_ptp; -extern int stmmac_ptp_register(struct stmmac_priv *priv); -extern void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_ptp_register(struct stmmac_priv *priv); +void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_freeze(struct net_device *ndev); int stmmac_restore(struct net_device *ndev); int stmmac_resume(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d4ccd35a01..8a7a23a84ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (config.flags) return -EINVAL; - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - priv->hwts_tx_en = 1; - break; - default: + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } if (priv->adv_ts) { switch (config.rx_filter) { @@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } } priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 023b7c29cb2..644d80ece06 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev) stmmac_dvr_remove(ndev); - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, priv->ioaddr); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 759441b29e5..b4d50d74ba1 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -3354,7 +3354,7 @@ use_random_mac_addr: #if defined(CONFIG_SPARC) addr = of_get_property(cp->of_node, "local-mac-address", NULL); if (addr != NULL) { - memcpy(dev_addr, addr, 6); + memcpy(dev_addr, addr, ETH_ALEN); goto done; } #endif @@ -5168,7 +5168,6 @@ err_out_free_netdev: err_out_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return -ENODEV; } @@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f28460ce24a..388540fcb97 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9875,7 +9875,6 @@ err_out_free_res: err_out_disable_pdev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return err; } @@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index e62df2b8130..b5655b79bd3 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp) return -1; #endif } - memcpy(dev->dev_addr, addr, 6); + memcpy(dev->dev_addr, addr, ETH_ALEN); #else get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); #endif @@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev) iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); - - pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index e37b587b386..0dbf46f08ed 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) addr = of_get_property(dp, "local-mac-address", &len); - if (qfe_slot != -1 && addr && len == 6) - memcpy(dev->dev_addr, addr, 6); + if (qfe_slot != -1 && addr && len == ETH_ALEN) + memcpy(dev->dev_addr, addr, ETH_ALEN); else - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); } hp = netdev_priv(dev); @@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, (addr = of_get_property(dp, "local-mac-address", &len)) != NULL && len == 6) { - memcpy(dev->dev_addr, addr, 6); + memcpy(dev->dev_addr, addr, ETH_ALEN); } else { - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); } #else get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); @@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev) pci_release_regions(hp->happy_dev); free_netdev(net_dev); - - pci_set_drvdata(pdev, NULL); } static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index b072f4dba03..5695ae2411d 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op) if (!dev) return -ENOMEM; - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); qe = netdev_priv(dev); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 571452e786d..4f1d2549130 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM - /*| NETIF_F_FRAGLIST */ ; ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; @@ -2447,7 +2446,6 @@ static void bdx_remove(struct pci_dev *pdev) iounmap(nic->regs); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); vfree(nic); RET(); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index de71b1ec462..53150c25a96 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA To compile this driver as a module, choose M here: the module will be called davinci_cpdma. This is recommended. +config TI_CPSW_PHY_SEL + boolean "TI CPSW Switch Phy sel Support" + depends on TI_CPSW + ---help--- + This driver supports configuring of the phy mode connected to + the CPSW. + config TI_CPSW tristate "TI CPSW Switch Support" depends on ARM && (ARCH_DAVINCI || SOC_AM33XX) select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO + select TI_CPSW_PHY_SEL ---help--- This driver supports TI's CPSW Ethernet Switch. diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index c65148e8aa1..9cfaab8152b 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o +obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c new file mode 100644 index 00000000000..148da9ae836 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -0,0 +1,161 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "cpsw.h" + +/* AM33xx SoC specific definitions for the CONTROL port */ +#define AM33XX_GMII_SEL_MODE_MII 0 +#define AM33XX_GMII_SEL_MODE_RMII 1 +#define AM33XX_GMII_SEL_MODE_RGMII 2 + +#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) +#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) + +struct cpsw_phy_sel_priv { + struct device *dev; + u32 __iomem *gmii_sel; + bool rmii_clock_external; + void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave); +}; + + +static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave) +{ + u32 reg; + u32 mask; + u32 mode = 0; + + reg = readl(priv->gmii_sel); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + mode = AM33XX_GMII_SEL_MODE_RMII; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + mode = AM33XX_GMII_SEL_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_MII: + default: + mode = AM33XX_GMII_SEL_MODE_MII; + break; + }; + + mask = 0x3 << (slave * 2) | BIT(slave + 6); + mode <<= slave * 2; + + if (priv->rmii_clock_external) { + if (slave == 0) + mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN; + else + mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; + } + + reg &= ~mask; + reg |= mode; + + writel(reg, priv->gmii_sel); +} + +static struct platform_driver cpsw_phy_sel_driver; +static int match(struct device *dev, void *data) +{ + struct device_node *node = (struct device_node *)data; + return dev->of_node == node && + dev->driver == &cpsw_phy_sel_driver.driver; +} + +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) +{ + struct device_node *node; + struct cpsw_phy_sel_priv *priv; + + node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); + if (!node) { + dev_err(dev, "Phy mode driver DT not found\n"); + return; + } + + dev = bus_find_device(&platform_bus_type, NULL, node, match); + priv = dev_get_drvdata(dev); + + priv->cpsw_phy_sel(priv, phy_mode, slave); +} +EXPORT_SYMBOL_GPL(cpsw_phy_sel); + +static const struct of_device_id cpsw_phy_sel_id_table[] = { + { + .compatible = "ti,am3352-cpsw-phy-sel", + .data = &cpsw_gmii_sel_am3352, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); + +static int cpsw_phy_sel_probe(struct platform_device *pdev) +{ + struct resource *res; + const struct of_device_id *of_id; + struct cpsw_phy_sel_priv *priv; + + of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node); + if (!of_id) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n"); + return -ENOMEM; + } + + priv->cpsw_phy_sel = of_id->data; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); + priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->gmii_sel)) + return PTR_ERR(priv->gmii_sel); + + if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL)) + priv->rmii_clock_external = true; + + dev_set_drvdata(&pdev->dev, priv); + + return 0; +} + +static struct platform_driver cpsw_phy_sel_driver = { + .probe = cpsw_phy_sel_probe, + .driver = { + .name = "cpsw-phy-sel", + .owner = THIS_MODULE, + .of_match_table = cpsw_phy_sel_id_table, + }, +}; + +module_platform_driver(cpsw_phy_sel_driver); +MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 79974e31187..5120d9ce1dd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -367,8 +367,6 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; - struct resource *cpsw_res; - struct resource *cpsw_wr_res; struct napi_struct napi; struct device *dev; struct cpsw_platform_data data; @@ -639,13 +637,6 @@ void cpsw_rx_handler(void *token, int len, int status) static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; - u32 rx, tx, rx_thresh; - - rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); - rx = __raw_readl(&priv->wr_regs->rx_stat); - tx = __raw_readl(&priv->wr_regs->tx_stat); - if (!rx_thresh && !rx && !tx) - return IRQ_NONE; cpsw_intr_disable(priv); if (priv->irq_enabled == true) { @@ -976,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries( priv->host_port, ALE_VLAN, slave->port_vlan); } -static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +static void soft_reset_slave(struct cpsw_slave *slave) { char name[32]; - u32 slave_port; - - sprintf(name, "slave-%d", slave->slave_num); + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); @@ -1023,6 +1019,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) dev_info(priv->dev, "phy found : id is : 0x%x\n", slave->phy->phy_id); phy_start(slave->phy); + + /* Configure GMII_SEL register */ + cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, + slave->slave_num); } } @@ -1151,6 +1151,12 @@ static int cpsw_ndo_open(struct net_device *ndev) * receive descs */ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + + if (cpts_register(&priv->pdev->dev, priv->cpts, + priv->data.cpts_clock_mult, + priv->data.cpts_clock_shift)) + dev_err(priv->dev, "error registering cpts device\n"); + } /* Enable Interrupt pacing if configured */ @@ -1169,9 +1175,9 @@ static int cpsw_ndo_open(struct net_device *ndev) } } + napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - napi_enable(&priv->napi); cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); @@ -1197,6 +1203,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_carrier_off(priv->ndev); if (cpsw_common_res_usage_state(priv) <= 1) { + cpts_unregister(priv->cpts); cpsw_intr_disable(priv); cpdma_ctlr_int_ctrl(priv->dma, false); cpdma_ctlr_stop(priv->dma); @@ -1328,6 +1335,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2) + return -EOPNOTSUPP; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1335,16 +1346,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (cfg.flags) return -EINVAL; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - cpts->tx_enable = 0; - break; - case HWTSTAMP_TX_ON: - cpts->tx_enable = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -1371,6 +1374,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + switch (priv->version) { case CPSW_VERSION_1: cpsw_hwtstamp_v1(priv); @@ -1379,7 +1384,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) cpsw_hwtstamp_v2(priv); break; default: - return -ENOTSUPP; + WARN_ON(1); } return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; @@ -1712,67 +1717,60 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (of_property_read_u32(node, "active_slave", &prop)) { pr_err("Missing active_slave property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->cpts_clock_mult = prop; if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { pr_err("Missing cpts_clock_shift property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->cpts_clock_shift = prop; - data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), - GFP_KERNEL); + data->slave_data = devm_kzalloc(&pdev->dev, data->slaves + * sizeof(struct cpsw_slave_data), + GFP_KERNEL); if (!data->slave_data) - return -EINVAL; + return -ENOMEM; if (of_property_read_u32(node, "cpdma_channels", &prop)) { pr_err("Missing cpdma_channels property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->channels = prop; if (of_property_read_u32(node, "ale_entries", &prop)) { pr_err("Missing ale_entries property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->ale_entries = prop; if (of_property_read_u32(node, "bd_ram_size", &prop)) { pr_err("Missing bd_ram_size property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->bd_ram_size = prop; if (of_property_read_u32(node, "rx_descs", &prop)) { pr_err("Missing rx_descs property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->rx_descs = prop; if (of_property_read_u32(node, "mac_control", &prop)) { pr_err("Missing mac_control property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } data->mac_control = prop; - if (!of_property_read_u32(node, "dual_emac", &prop)) - data->dual_emac = prop; + if (of_property_read_bool(node, "dual_emac")) + data->dual_emac = 1; /* * Populate all the child nodes here... @@ -1782,7 +1780,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (ret) pr_warn("Doesn't have any child node\n"); - for_each_node_by_name(slave_node, "slave") { + for_each_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; u32 phyid; @@ -1791,11 +1789,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct device_node *mdio_node; struct platform_device *mdio; + /* This is no slave child node, continue */ + if (strcmp(slave_node->name, "slave")) + continue; + parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { pr_err("Missing slave[%d] phy_id property\n", i); - ret = -EINVAL; - goto error_ret; + return -EINVAL; } mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); @@ -1822,13 +1823,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } i++; + if (i == data->slaves) + break; } return 0; - -error_ret: - kfree(data->slave_data); - return ret; } static int cpsw_probe_dual_emac(struct platform_device *pdev, @@ -1870,7 +1869,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->coal_intvl = 0; priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; - priv_sl2->cpsw_res = priv->cpsw_res; priv_sl2->regs = priv->regs; priv_sl2->host_port = priv->host_port; priv_sl2->host_port_regs = priv->host_port_regs; @@ -1914,8 +1912,8 @@ static int cpsw_probe(struct platform_device *pdev) struct cpsw_priv *priv; struct cpdma_params dma_params; struct cpsw_ale_params ale_params; - void __iomem *ss_regs, *wr_regs; - struct resource *res; + void __iomem *ss_regs; + struct resource *res, *ss_res; u32 slave_offset, sliver_offset, slave_size; int ret = 0, i, k = 0; @@ -1951,7 +1949,7 @@ static int cpsw_probe(struct platform_device *pdev) if (cpsw_probe_dt(&priv->data, pdev)) { pr_err("cpsw: platform data missing\n"); ret = -ENODEV; - goto clean_ndev_ret; + goto clean_runtime_disable_ret; } data = &priv->data; @@ -1965,11 +1963,12 @@ static int cpsw_probe(struct platform_device *pdev) memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); - priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, - GFP_KERNEL); + priv->slaves = devm_kzalloc(&pdev->dev, + sizeof(struct cpsw_slave) * data->slaves, + GFP_KERNEL); if (!priv->slaves) { - ret = -EBUSY; - goto clean_ndev_ret; + ret = -ENOMEM; + goto clean_runtime_disable_ret; } for (i = 0; i < data->slaves; i++) priv->slaves[i].slave_num = i; @@ -1977,55 +1976,37 @@ static int cpsw_probe(struct platform_device *pdev) priv->slaves[0].ndev = ndev; priv->emac_port = 0; - priv->clk = clk_get(&pdev->dev, "fck"); + priv->clk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "fck is not found\n"); + dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; - goto clean_slave_ret; + goto clean_runtime_disable_ret; } priv->coal_intvl = 0; priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; - priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!priv->cpsw_res) { - dev_err(priv->dev, "error getting i/o resource\n"); - ret = -ENOENT; - goto clean_clk_ret; - } - if (!request_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res), ndev->name)) { - dev_err(priv->dev, "failed request i/o region\n"); - ret = -ENXIO; - goto clean_clk_ret; - } - ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); - if (!ss_regs) { - dev_err(priv->dev, "unable to map i/o region\n"); - goto clean_cpsw_iores_ret; + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); + if (IS_ERR(ss_regs)) { + ret = PTR_ERR(ss_regs); + goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->version = __raw_readl(&priv->regs->id_ver); priv->host_port = HOST_PORT_NUM; - priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!priv->cpsw_wr_res) { - dev_err(priv->dev, "error getting i/o resource\n"); - ret = -ENOENT; - goto clean_iomap_ret; - } - if (!request_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res), ndev->name)) { - dev_err(priv->dev, "failed request i/o region\n"); - ret = -ENXIO; - goto clean_iomap_ret; - } - wr_regs = ioremap(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); - if (!wr_regs) { - dev_err(priv->dev, "unable to map i/o region\n"); - goto clean_cpsw_wr_iores_ret; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + pm_runtime_get_sync(&pdev->dev); + priv->version = readl(&priv->regs->id_ver); + pm_runtime_put_sync(&pdev->dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->wr_regs)) { + ret = PTR_ERR(priv->wr_regs); + goto clean_runtime_disable_ret; } - priv->wr_regs = wr_regs; memset(&dma_params, 0, sizeof(dma_params)); memset(&ale_params, 0, sizeof(ale_params)); @@ -2056,12 +2037,12 @@ static int cpsw_probe(struct platform_device *pdev) slave_size = CPSW2_SLAVE_SIZE; sliver_offset = CPSW2_SLIVER_OFFSET; dma_params.desc_mem_phys = - (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET; + (u32 __force) ss_res->start + CPSW2_BD_OFFSET; break; default: dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); ret = -ENODEV; - goto clean_cpsw_wr_iores_ret; + goto clean_runtime_disable_ret; } for (i = 0; i < priv->data.slaves; i++) { struct cpsw_slave *slave = &priv->slaves[i]; @@ -2089,7 +2070,7 @@ static int cpsw_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; - goto clean_wr_iomap_ret; + goto clean_runtime_disable_ret; } priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), @@ -2124,8 +2105,8 @@ static int cpsw_probe(struct platform_device *pdev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (request_irq(i, cpsw_interrupt, 0, - dev_name(&pdev->dev), priv)) { + if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, + dev_name(priv->dev), priv)) { dev_err(priv->dev, "error attaching irq\n"); goto clean_ale_ret; } @@ -2147,7 +2128,7 @@ static int cpsw_probe(struct platform_device *pdev) if (ret) { dev_err(priv->dev, "error registering net device\n"); ret = -ENODEV; - goto clean_irq_ret; + goto clean_ale_ret; } if (cpts_register(&pdev->dev, priv->cpts, @@ -2155,44 +2136,27 @@ static int cpsw_probe(struct platform_device *pdev) dev_err(priv->dev, "error registering cpts device\n"); cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", - priv->cpsw_res->start, ndev->irq); + ss_res->start, ndev->irq); if (priv->data.dual_emac) { ret = cpsw_probe_dual_emac(pdev, priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_irq_ret; + goto clean_ale_ret; } } return 0; -clean_irq_ret: - for (i = 0; i < priv->num_irqs; i++) - free_irq(priv->irqs_table[i], priv); clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); -clean_wr_iomap_ret: - iounmap(priv->wr_regs); -clean_cpsw_wr_iores_ret: - release_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); -clean_iomap_ret: - iounmap(priv->regs); -clean_cpsw_iores_ret: - release_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res)); -clean_clk_ret: - clk_put(priv->clk); -clean_slave_ret: +clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); - kfree(priv->slaves); clean_ndev_ret: - kfree(priv->data.slave_data); free_netdev(priv->ndev); return ret; } @@ -2201,30 +2165,16 @@ static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - int i; if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); - cpts_unregister(priv->cpts); - for (i = 0; i < priv->num_irqs; i++) - free_irq(priv->irqs_table[i], priv); - cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); - iounmap(priv->regs); - release_mem_region(priv->cpsw_res->start, - resource_size(priv->cpsw_res)); - iounmap(priv->wr_regs); - release_mem_region(priv->cpsw_wr_res->start, - resource_size(priv->cpsw_wr_res)); pm_runtime_disable(&pdev->dev); - clk_put(priv->clk); - kfree(priv->slaves); - kfree(priv->data.slave_data); if (priv->data.dual_emac) free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); @@ -2239,8 +2189,9 @@ static int cpsw_suspend(struct device *dev) if (netif_running(ndev)) cpsw_ndo_stop(ndev); - soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); - soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); + + for_each_slave(priv, soft_reset_slave); + pm_runtime_put_sync(&pdev->dev); /* Select sleep pin state */ @@ -2280,7 +2231,7 @@ static struct platform_driver cpsw_driver = { .name = "cpsw", .owner = THIS_MODULE, .pm = &cpsw_pm_ops, - .of_match_table = of_match_ptr(cpsw_of_mtable), + .of_match_table = cpsw_of_mtable, }, .probe = cpsw_probe, .remove = cpsw_remove, diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index eb3e101ec04..574f49da693 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -39,4 +39,6 @@ struct cpsw_platform_data { bool dual_emac; /* Enable Dual EMAC mode */ }; +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); + #endif /* __CPSW_H__ */ diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index fe993cdd7e2..1a581ef7eee 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -127,8 +127,8 @@ struct cpts { }; #ifdef CONFIG_TI_CPTS -extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); -extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); +void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); +void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); #else static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) { @@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) } #endif -extern int cpts_register(struct device *dev, struct cpts *cpts, - u32 mult, u32 shift); -extern void cpts_unregister(struct cpts *cpts); +int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift); +void cpts_unregister(struct cpts *cpts); #endif diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 67df09ea9d0..cd9b164a043 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -61,6 +61,7 @@ #include <linux/davinci_emac.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -876,8 +877,7 @@ static void emac_dev_mcast_set(struct net_device *ndev) netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); - } - if (!netdev_mc_empty(ndev)) { + } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); @@ -1753,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; +static const struct of_device_id davinci_emac_of_match[]; + static struct emac_platform_data * davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; + const struct of_device_id *match; + const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; @@ -1794,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) - pdata->phy_id = ""; + pdata->phy_id = NULL; + + auxdata = pdev->dev.platform_data; + if (auxdata) { + pdata->interrupt_enable = auxdata->interrupt_enable; + pdata->interrupt_disable = auxdata->interrupt_disable; + } + + match = of_match_device(davinci_emac_of_match, &pdev->dev); + if (match && match->data) { + auxdata = match->data; + pdata->version = auxdata->version; + pdata->hw_ram_addr = auxdata->hw_ram_addr; + } pdev->dev.platform_data = pdata; @@ -1853,7 +1870,7 @@ static int davinci_emac_probe(struct platform_device *pdev) } /* MAC addr and PHY mask , RMII enable info from platform_data */ - memcpy(priv->mac_addr, pdata->mac_addr, 6); + memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN); priv->phy_id = pdata->phy_id; priv->rmii_en = pdata->rmii_en; priv->version = pdata->version; @@ -2021,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { }; #if IS_ENABLED(CONFIG_OF) +static const struct emac_platform_data am3517_emac_data = { + .version = EMAC_VERSION_2, + .hw_ram_addr = 0x01e20000, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, + {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 591437e59b9..62b19be5183 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev) free_netdev(dev); - pci_set_drvdata(pdev, NULL); cancel_work_sync(&priv->tlan_tqueue); } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 13e6fff8ca2..628b736e5ae 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) nz_addr |= mac[i]; if (nz_addr) { - memcpy(dev->dev_addr, mac, 6); + memcpy(dev->dev_addr, mac, ETH_ALEN); dev->addr_len = 6; } else { eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 106be47716e..edb2e12a0fe 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1008,6 +1008,8 @@ static void tile_net_register(void *dev_ptr) info->egress_timer.data = (long)info; info->egress_timer.function = tile_net_handle_egress_timer; + u64_stats_init(&info->stats.syncp); + priv->cpu[my_cpu] = info; /* diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 309abb472aa..8505196be9f 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port) } #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC -extern void udbg_shutdown_ps3gelic(void); +void udbg_shutdown_ps3gelic(void); #else static inline void udbg_shutdown_ps3gelic(void) {} #endif -extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); +int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); /* shared netdev ops */ -extern void gelic_card_up(struct gelic_card *card); -extern void gelic_card_down(struct gelic_card *card); -extern int gelic_net_open(struct net_device *netdev); -extern int gelic_net_stop(struct net_device *netdev); -extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); -extern void gelic_net_set_multi(struct net_device *netdev); -extern void gelic_net_tx_timeout(struct net_device *netdev); -extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); -extern int gelic_net_setup_netdev(struct net_device *netdev, - struct gelic_card *card); +void gelic_card_up(struct gelic_card *card); +void gelic_card_down(struct gelic_card *card); +int gelic_net_open(struct net_device *netdev); +int gelic_net_stop(struct net_device *netdev); +int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +void gelic_net_set_multi(struct net_device *netdev); +void gelic_net_tx_timeout(struct net_device *netdev); +int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); +int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card); /* shared ethtool ops */ -extern void gelic_net_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info); -extern void gelic_net_poll_controller(struct net_device *netdev); +void gelic_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info); +void gelic_net_poll_controller(struct net_device *netdev); #endif /* _GELIC_NET_H */ diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h index f7e51b7d704..11f443d8e4e 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h @@ -320,7 +320,7 @@ struct gelic_eurus_cmd { #define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0) #define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1) -extern int gelic_wl_driver_probe(struct gelic_card *card); -extern int gelic_wl_driver_remove(struct gelic_card *card); -extern void gelic_wl_interrupt(struct net_device *netdev, u64 status); +int gelic_wl_driver_probe(struct gelic_card *card); +int gelic_wl_driver_remove(struct gelic_card *card); +void gelic_wl_interrupt(struct net_device *netdev, u64 status); #endif /* _GELIC_WIRELESS_H */ diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 5734480c1ec..3f4a32e39d2 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2478,7 +2478,6 @@ out_release_regions: pci_release_regions(pdev); out_disable_dev: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return NULL; } diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h index 4ba2135474d..9b6af0845a1 100644 --- a/drivers/net/ethernet/toshiba/spider_net.h +++ b/drivers/net/ethernet/toshiba/spider_net.h @@ -29,8 +29,8 @@ #include <linux/sungem_phy.h> -extern int spider_net_stop(struct net_device *netdev); -extern int spider_net_open(struct net_device *netdev); +int spider_net_stop(struct net_device *netdev); +int spider_net_open(struct net_device *netdev); extern const struct ethtool_ops spider_net_ethtool_ops; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index a971b9cca56..1322546d92a 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev) mdiobus_free(lp->mii_bus); unregister_netdev(dev); free_netdev(dev); - pci_set_drvdata(pdev, NULL); } static int diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index bdf697b184a..cce6c4bc556 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -987,6 +987,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rp->base = ioaddr; + u64_stats_init(&rp->tx_stats.syncp); + u64_stats_init(&rp->rx_stats.syncp); + /* Get chip registers into a sane state */ rhine_power_init(dev); rhine_hw_init(dev, pioaddr); @@ -2292,7 +2295,6 @@ static void rhine_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void rhine_shutdown (struct pci_dev *pdev) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d022bf93657..ad61d26a44f 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget) unsigned int rx_done; unsigned long flags; - spin_lock_irqsave(&vptr->lock, flags); /* * Do rx and tx twice for performance (taken from the VIA * out-of-tree driver). */ - rx_done = velocity_rx_srv(vptr, budget / 2); - velocity_tx_srv(vptr); - rx_done += velocity_rx_srv(vptr, budget - rx_done); + rx_done = velocity_rx_srv(vptr, budget); + spin_lock_irqsave(&vptr->lock, flags); velocity_tx_srv(vptr); - /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { napi_complete(napi); @@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out_free_tmp_vptr_1; + napi_disable(&vptr->napi); + spin_lock_irqsave(&vptr->lock, flags); netif_stop_queue(dev); @@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_give_many_rx_descs(vptr); + napi_enable(&vptr->napi); + mac_enable_int(vptr->mac_regs); netif_start_queue(dev); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 0029148077a..2166e879a09 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -36,6 +36,7 @@ #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/of_address.h> @@ -1016,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op) platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &temac_netdev_ops; ndev->ethtool_ops = &temac_ethtool_ops; #if 0 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2ff038d6d2..f9293da19e2 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op) SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 4c619ea5189..fefb8cd5eb6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -31,7 +31,7 @@ #define DRIVER_NAME "xilinx_emaclite" /* Register offsets for the EmacLite Core */ -#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ +#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ #define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */ #define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */ #define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */ @@ -63,13 +63,13 @@ #define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */ /* Global Interrupt Enable Register (GIER) Bit Masks */ -#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ +#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ /* Transmit Status Register (TSR) Bit Masks */ -#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ -#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ -#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ -#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit +#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ +#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ +#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ +#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented * in the HW spec */ @@ -77,21 +77,21 @@ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) /* Receive Status Register (RSR) */ -#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ -#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ +#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ +#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ /* Transmit Packet Length Register (TPLR) */ -#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ +#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ /* Receive Packet Length Register (RPLR) */ -#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ +#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ -#define XEL_HEADER_OFFSET 12 /* Offset to length field */ -#define XEL_HEADER_SHIFT 16 /* Shift value for length */ +#define XEL_HEADER_OFFSET 12 /* Offset to length field */ +#define XEL_HEADER_SHIFT 16 /* Shift value for length */ /* General Ethernet Definitions */ -#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ -#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ +#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ +#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ @@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, drvdata->base_addr + XEL_TSR_OFFSET); - /* Enable the Tx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Enable the Rx interrupts for the first buffer */ __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); - /* Enable the Rx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->rx_ping_pong != 0) { - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); - } - /* Enable the Global Interrupt Enable */ __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } @@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), drvdata->base_addr + XEL_TSR_OFFSET); - /* Disable the Tx interrupts for the second Buffer - * if configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Disable the Rx interrupts for the first buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), drvdata->base_addr + XEL_RSR_OFFSET); - - /* Disable the Rx interrupts for the second buffer - * if configured in HW */ - if (drvdata->rx_ping_pong != 0) { - - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - } } /** @@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); + /* Output a word */ *to_u32_ptr++ = align_buffer; } @@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, for (; length > 0; length--) *to_u8_ptr++ = *from_u8_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); *to_u32_ptr = align_buffer; } } @@ -1075,14 +1050,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) * This function un maps the IO region of the Emaclite device and frees the net * device. */ -static void xemaclite_remove_ndev(struct net_device *ndev, - struct platform_device *pdev) +static void xemaclite_remove_ndev(struct net_device *ndev) { if (ndev) { - struct net_local *lp = netdev_priv(ndev); - - if (lp->base_addr) - devm_iounmap(&pdev->dev, lp->base_addr); free_netdev(ndev); } } @@ -1177,7 +1147,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (mac_address) /* Set the MAC address. */ - memcpy(ndev->dev_addr, mac_address, 6); + memcpy(ndev->dev_addr, mac_address, ETH_ALEN); else dev_warn(dev, "No MAC address found\n"); @@ -1214,7 +1184,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) return 0; error: - xemaclite_remove_ndev(ndev, ofdev); + xemaclite_remove_ndev(ndev); return rc; } @@ -1248,7 +1218,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - xemaclite_remove_ndev(ndev, of_dev); + xemaclite_remove_ndev(ndev); return 0; } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e78802e75ea..bcc224a8373 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ch = PORT2CHANNEL(port); regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - port->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - port->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); |