From 916c775ff297dc60219a4f0e5527ba6ab4a88ed4 Mon Sep 17 00:00:00 2001 From: Eilon Greenstein Date: Mon, 9 Mar 2009 00:52:14 +0000 Subject: bnx2x: Adding restriction on sge_buf_size sge_buff_size may not be more than 0xffff. Reported-by: Bjorn Helgaas Signed-off-by: Vladislav Zolotarov Tested-by: Bjorn Helgaas Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/bnx2x_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index d3e7775a9cc..48127f11d03 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -4518,7 +4518,8 @@ static void bnx2x_init_context(struct bnx2x *bp) (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA | USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING); context->ustorm_st_context.common.sge_buff_size = - (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE); + (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, + (u32)0xffff); context->ustorm_st_context.common.sge_page_base_hi = U64_HI(fp->rx_sge_mapping); context->ustorm_st_context.common.sge_page_base_lo = -- cgit v1.2.3-70-g09d2 From 6dc7d8c843024c2636cf52d3f93047acbcd765f2 Mon Sep 17 00:00:00 2001 From: Eilon Greenstein Date: Mon, 9 Mar 2009 00:52:17 +0000 Subject: bnx2x: Casting page alignment Adding a proper cast to the argument of PAGE_ALIGN macro so that the output won't depend on its original type. Without this cast aligned value will be truncated to the size of the argument type. Reported-by: Bjorn Helgaas Signed-off-by: Vladislav Zolotarov Tested-by: Bjorn Helgaas Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/bnx2x.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 15a5cf0f676..3cf2b92eef3 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h @@ -152,7 +152,7 @@ struct sw_rx_page { #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) #define SGE_PAGE_SIZE PAGE_SIZE #define SGE_PAGE_SHIFT PAGE_SHIFT -#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr) +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))addr) #define BCM_RX_ETH_PAYLOAD_ALIGN 64 -- cgit v1.2.3-70-g09d2 From db434ac6bff0d991d0b60166dc9d6405b873d0f7 Mon Sep 17 00:00:00 2001 From: Eilon Greenstein Date: Mon, 9 Mar 2009 00:52:21 +0000 Subject: bnx2x: Using DMAE to initialize the chip There was a bug, which occasionally caused failure in PRAM initialization after the cold boot. Also incremented version number to 1.45.27. Signed-off-by: Vladislav Zolotarov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/bnx2x_init.h | 4 ---- drivers/net/bnx2x_main.c | 18 +++++++++--------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index a6c0b3abba2..3b0c2499ef1 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h @@ -150,7 +150,6 @@ static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data, static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) { -#ifdef USE_DMAE int offset = 0; if (bp->dmae_ready) { @@ -164,9 +163,6 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) addr + offset, len); } else bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len); -#else - bnx2x_init_str_wr(bp, addr, bp->gunzip_buf, len); -#endif } static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 48127f11d03..2e346a5e98c 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -57,7 +57,7 @@ #include "bnx2x.h" #include "bnx2x_init.h" -#define DRV_MODULE_VERSION "1.45.26" +#define DRV_MODULE_VERSION "1.45.27" #define DRV_MODULE_RELDATE "2009/01/26" #define BNX2X_BC_VER 0x040200 @@ -4035,10 +4035,10 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) { int port = BP_PORT(bp); - bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + + bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, sizeof(struct ustorm_status_block)/4); - bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + + bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, sizeof(struct cstorm_status_block)/4); } @@ -4092,18 +4092,18 @@ static void bnx2x_zero_def_sb(struct bnx2x *bp) { int func = BP_FUNC(bp); - bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + + bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR + + TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, + sizeof(struct tstorm_def_status_block)/4); + bnx2x_init_fill(bp, USTORM_INTMEM_ADDR + USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, sizeof(struct ustorm_def_status_block)/4); - bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + + bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR + CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, sizeof(struct cstorm_def_status_block)/4); - bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM + + bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR + XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, sizeof(struct xstorm_def_status_block)/4); - bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM + - TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, - sizeof(struct tstorm_def_status_block)/4); } static void bnx2x_init_def_sb(struct bnx2x *bp, -- cgit v1.2.3-70-g09d2 From ff8cf9a93800e8118ea097c1aba7203d59a0f3f1 Mon Sep 17 00:00:00 2001 From: John Dykstra Date: Wed, 11 Mar 2009 09:22:51 -0700 Subject: ipv6: Fix BUG when disabled ipv6 module is unloaded Do not try to "uninitialize" ipv6 if its initialization had been skipped because module parameter disable=1 had been specified. Reported-by: Thomas Backlund Signed-off-by: John Dykstra Acked-by: Brian Haley Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index da944eca2ca..9c8309ed35c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1192,6 +1192,9 @@ module_init(inet6_init); static void __exit inet6_exit(void) { + if (disable_ipv6) + return; + /* First of all disallow new sockets creation. */ sock_unregister(PF_INET6); /* Disallow any further netlink messages */ -- cgit v1.2.3-70-g09d2 From 4796417417a62e2ae83d92cb92e1ecf9ec67b5f5 Mon Sep 17 00:00:00 2001 From: Ilya Yanok Date: Wed, 11 Mar 2009 23:26:02 -0700 Subject: dnet: Dave DNET ethernet controller driver (updated) Driver for Dave DNET ethernet controller found on Dave/DENX QongEVB-LITE FPGA. Heavily based on Dave sources, I've just adopted it to current kernel version and done some code cleanup. Signed-off-by: Ilya Yanok Signed-off-by: David S. Miller --- drivers/net/Kconfig | 11 + drivers/net/Makefile | 1 + drivers/net/dnet.c | 994 +++++++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/dnet.h | 225 ++++++++++++ 4 files changed, 1231 insertions(+) create mode 100644 drivers/net/dnet.c create mode 100644 drivers/net/dnet.h diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a2f185fd707..5c28b06dac6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1040,6 +1040,17 @@ config NI65 To compile this driver as a module, choose M here. The module will be called ni65. +config DNET + tristate "Dave ethernet support (DNET)" + depends on NET_ETHERNET + select PHYLIB + help + The Dave ethernet interface (DNET) is found on Qong Board FPGA. + Say Y to include support for the DNET chip. + + To compile this driver as a module, choose M here: the module + will be called dnet. + source "drivers/net/tulip/Kconfig" config AT1700 diff --git a/drivers/net/Makefile b/drivers/net/Makefile index aca8492db65..6d9bba58b9a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -231,6 +231,7 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o +obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_MACB) += macb.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c new file mode 100644 index 00000000000..92c3bd3a472 --- /dev/null +++ b/drivers/net/dnet.c @@ -0,0 +1,994 @@ +/* + * Dave DNET Ethernet Controller driver + * + * Copyright (C) 2008 Dave S.r.l. + * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dnet.h" + +#undef DEBUG + +/* function for reading internal MAC register */ +u16 dnet_readw_mac(struct dnet *bp, u16 reg) +{ + u16 data_read; + + /* issue a read */ + dnet_writel(bp, reg, MACREG_ADDR); + + /* since a read/write op to the MAC is very slow, + * we must wait before reading the data */ + ndelay(500); + + /* read data read from the MAC register */ + data_read = dnet_readl(bp, MACREG_DATA); + + /* all done */ + return data_read; +} + +/* function for writing internal MAC register */ +void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) +{ + /* load data to write */ + dnet_writel(bp, val, MACREG_DATA); + + /* issue a write */ + dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); + + /* since a read/write op to the MAC is very slow, + * we must wait before exiting */ + ndelay(500); +} + +static void __dnet_set_hwaddr(struct dnet *bp) +{ + u16 tmp; + + tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr)); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); + tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2))); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); + tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4))); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); +} + +static void __devinit dnet_get_hwaddr(struct dnet *bp) +{ + u16 tmp; + u8 addr[6]; + + /* + * from MAC docs: + * "Note that the MAC address is stored in the registers in Hexadecimal + * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 + * would require writing 0xAC (octet 0) to address 0x0B (high byte of + * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of + * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of + * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of + * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of + * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of + * Mac_addr[15:0]). + */ + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); + *((u16 *) addr) = be16_to_cpu(tmp); + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); + *((u16 *) (addr + 2)) = be16_to_cpu(tmp); + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); + *((u16 *) (addr + 4)) = be16_to_cpu(tmp); + + if (is_valid_ether_addr(addr)) + memcpy(bp->dev->dev_addr, addr, sizeof(addr)); +} + +static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct dnet *bp = bus->priv; + u16 value; + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + /* only 5 bits allowed for phy-addr and reg_offset */ + mii_id &= 0x1f; + regnum &= 0x1f; + + /* prepare reg_value for a read */ + value = (mii_id << 8); + value |= regnum; + + /* write control word */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); + + /* wait for end of transfer */ + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); + + pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); + + return value; +} + +static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct dnet *bp = bus->priv; + u16 tmp; + + pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + /* prepare for a write operation */ + tmp = (1 << 13); + + /* only 5 bits allowed for phy-addr and reg_offset */ + mii_id &= 0x1f; + regnum &= 0x1f; + + /* only 16 bits on data */ + value &= 0xffff; + + /* prepare reg_value for a write */ + tmp |= (mii_id << 8); + tmp |= regnum; + + /* write data to write first */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); + + /* write control word */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + return 0; +} + +static int dnet_mdio_reset(struct mii_bus *bus) +{ + return 0; +} + +static void dnet_handle_link_change(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + unsigned long flags; + u32 mode_reg, ctl_reg; + + int status_change = 0; + + spin_lock_irqsave(&bp->lock, flags); + + mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); + ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); + + if (phydev->link) { + if (bp->duplex != phydev->duplex) { + if (phydev->duplex) + ctl_reg &= + ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); + else + ctl_reg |= + DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; + + bp->duplex = phydev->duplex; + status_change = 1; + } + + if (bp->speed != phydev->speed) { + status_change = 1; + switch (phydev->speed) { + case 1000: + mode_reg |= DNET_INTERNAL_MODE_GBITEN; + break; + case 100: + case 10: + mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not " + "10/100/1000!\n", dev->name, + phydev->speed); + break; + } + bp->speed = phydev->speed; + } + } + + if (phydev->link != bp->link) { + if (phydev->link) { + mode_reg |= + (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); + } else { + mode_reg &= + ~(DNET_INTERNAL_MODE_RXEN | + DNET_INTERNAL_MODE_TXEN); + bp->speed = 0; + bp->duplex = -1; + } + bp->link = phydev->link; + + status_change = 1; + } + + if (status_change) { + dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); + dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); + } + + spin_unlock_irqrestore(&bp->lock, flags); + + if (status_change) { + if (phydev->link) + printk(KERN_INFO "%s: link up (%d/%s)\n", + dev->name, phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + printk(KERN_INFO "%s: link down\n", dev->name); + } +} + +static int dnet_mii_probe(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + /* find the first phy */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (bp->mii_bus->phy_map[phy_addr]) { + phydev = bp->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENODEV; + } + + /* TODO : add pin_irq */ + + /* attach the mac to the phy */ + if (bp->capabilities & DNET_HAS_RMII) { + phydev = phy_connect(dev, phydev->dev.bus_id, + &dnet_handle_link_change, 0, + PHY_INTERFACE_MODE_RMII); + } else { + phydev = phy_connect(dev, phydev->dev.bus_id, + &dnet_handle_link_change, 0, + PHY_INTERFACE_MODE_MII); + } + + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + if (bp->capabilities & DNET_HAS_GIGABIT) + phydev->supported &= PHY_GBIT_FEATURES; + else + phydev->supported &= PHY_BASIC_FEATURES; + + phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; + + phydev->advertising = phydev->supported; + + bp->link = 0; + bp->speed = 0; + bp->duplex = -1; + bp->phy_dev = phydev; + + return 0; +} + +static int dnet_mii_init(struct dnet *bp) +{ + int err, i; + + bp->mii_bus = mdiobus_alloc(); + if (bp->mii_bus == NULL) + return -ENOMEM; + + bp->mii_bus->name = "dnet_mii_bus"; + bp->mii_bus->read = &dnet_mdio_read; + bp->mii_bus->write = &dnet_mdio_write; + bp->mii_bus->reset = &dnet_mdio_reset; + + snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); + + bp->mii_bus->priv = bp; + + bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!bp->mii_bus->irq) { + err = -ENOMEM; + goto err_out; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus->irq[i] = PHY_POLL; + + platform_set_drvdata(bp->dev, bp->mii_bus); + + if (mdiobus_register(bp->mii_bus)) { + err = -ENXIO; + goto err_out_free_mdio_irq; + } + + if (dnet_mii_probe(bp->dev) != 0) { + err = -ENXIO; + goto err_out_unregister_bus; + } + + return 0; + +err_out_unregister_bus: + mdiobus_unregister(bp->mii_bus); +err_out_free_mdio_irq: + kfree(bp->mii_bus->irq); +err_out: + mdiobus_free(bp->mii_bus); + return err; +} + +/* For Neptune board: LINK1000 as Link LED and TX as activity LED */ +int dnet_phy_marvell_fixup(struct phy_device *phydev) +{ + return phy_write(phydev, 0x18, 0x4148); +} + +static void dnet_update_stats(struct dnet *bp) +{ + u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; + u32 *p = &bp->hw_stats.rx_pkt_ignr; + u32 *end = &bp->hw_stats.rx_byte + 1; + + WARN_ON((unsigned long)(end - p - 1) != + (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); + + for (; p < end; p++, reg++) + *p += readl(reg); + + reg = bp->regs + DNET_TX_UNICAST_CNT; + p = &bp->hw_stats.tx_unicast; + end = &bp->hw_stats.tx_byte + 1; + + WARN_ON((unsigned long)(end - p - 1) != + (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); + + for (; p < end; p++, reg++) + *p += readl(reg); +} + +static int dnet_poll(struct napi_struct *napi, int budget) +{ + struct dnet *bp = container_of(napi, struct dnet, napi); + struct net_device *dev = bp->dev; + int npackets = 0; + unsigned int pkt_len; + struct sk_buff *skb; + unsigned int *data_ptr; + u32 int_enable; + u32 cmd_word; + int i; + + while (npackets < budget) { + /* + * break out of while loop if there are no more + * packets waiting + */ + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { + netif_rx_complete(napi); + int_enable = dnet_readl(bp, INTR_ENB); + int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); + return 0; + } + + cmd_word = dnet_readl(bp, RX_LEN_FIFO); + pkt_len = cmd_word & 0xFFFF; + + if (cmd_word & 0xDF180000) + printk(KERN_ERR "%s packet receive error %x\n", + __func__, cmd_word); + + skb = dev_alloc_skb(pkt_len + 5); + if (skb != NULL) { + /* Align IP on 16 byte boundaries */ + skb_reserve(skb, 2); + /* + * 'skb_put()' points to the start of sk_buff + * data area. + */ + data_ptr = (unsigned int *)skb_put(skb, pkt_len); + for (i = 0; i < (pkt_len + 3) >> 2; i++) + *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + npackets++; + } else + printk(KERN_NOTICE + "%s: No memory to allocate a sk_buff of " + "size %u.\n", dev->name, pkt_len); + } + + budget -= npackets; + + if (npackets < budget) { + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts */ + netif_rx_complete(napi); + int_enable = dnet_readl(bp, INTR_ENB); + int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); + return 0; + } + + /* There are still packets waiting */ + return 1; +} + +static irqreturn_t dnet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct dnet *bp = netdev_priv(dev); + u32 int_src, int_enable, int_current; + unsigned long flags; + unsigned int handled = 0; + + spin_lock_irqsave(&bp->lock, flags); + + /* read and clear the DNET irq (clear on read) */ + int_src = dnet_readl(bp, INTR_SRC); + int_enable = dnet_readl(bp, INTR_ENB); + int_current = int_src & int_enable; + + /* restart the queue if we had stopped it for TX fifo almost full */ + if (int_current & DNET_INTR_SRC_TX_FIFOAE) { + int_enable = dnet_readl(bp, INTR_ENB); + int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; + dnet_writel(bp, int_enable, INTR_ENB); + netif_wake_queue(dev); + handled = 1; + } + + /* RX FIFO error checking */ + if (int_current & + (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { + printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, + dnet_readl(bp, RX_STATUS), int_current); + /* we can only flush the RX FIFOs */ + dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); + ndelay(500); + dnet_writel(bp, 0, SYS_CTL); + handled = 1; + } + + /* TX FIFO error checking */ + if (int_current & + (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { + printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, + dnet_readl(bp, TX_STATUS), int_current); + /* we can only flush the TX FIFOs */ + dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); + ndelay(500); + dnet_writel(bp, 0, SYS_CTL); + handled = 1; + } + + if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { + if (netif_rx_schedule_prep(&bp->napi)) { + /* + * There's no point taking any more interrupts + * until we have processed the buffers + */ + /* Disable Rx interrupts and schedule NAPI poll */ + int_enable = dnet_readl(bp, INTR_ENB); + int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); + __netif_rx_schedule(&bp->napi); + } + handled = 1; + } + + if (!handled) + pr_debug("%s: irq %x remains\n", __func__, int_current); + + spin_unlock_irqrestore(&bp->lock, flags); + + return IRQ_RETVAL(handled); +} + +#ifdef DEBUG +static inline void dnet_print_skb(struct sk_buff *skb) +{ + int k; + printk(KERN_DEBUG PFX "data:"); + for (k = 0; k < skb->len; k++) + printk(" %02x", (unsigned int)skb->data[k]); + printk("\n"); +} +#else +#define dnet_print_skb(skb) do {} while (0) +#endif + +static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + + struct dnet *bp = netdev_priv(dev); + u32 tx_status, irq_enable; + unsigned int len, i, tx_cmd, wrsz; + unsigned long flags; + unsigned int *bufp; + + tx_status = dnet_readl(bp, TX_STATUS); + + pr_debug("start_xmit: len %u head %p data %p tail %p end %p\n", + skb->len, skb->head, skb->data, skb->tail, skb->end); + dnet_print_skb(skb); + + /* frame size (words) */ + len = (skb->len + 3) >> 2; + + spin_lock_irqsave(&bp->lock, flags); + + tx_status = dnet_readl(bp, TX_STATUS); + + bufp = (unsigned int *)(((u32) skb->data) & 0xFFFFFFFC); + wrsz = (u32) skb->len + 3; + wrsz += ((u32) skb->data) & 0x3; + wrsz >>= 2; + tx_cmd = ((((unsigned int)(skb->data)) & 0x03) << 16) | (u32) skb->len; + + /* check if there is enough room for the current frame */ + if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { + for (i = 0; i < wrsz; i++) + dnet_writel(bp, *bufp++, TX_DATA_FIFO); + + /* + * inform MAC that a packet's written and ready to be + * shipped out + */ + dnet_writel(bp, tx_cmd, TX_LEN_FIFO); + } + + if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { + netif_stop_queue(dev); + tx_status = dnet_readl(bp, INTR_SRC); + irq_enable = dnet_readl(bp, INTR_ENB); + irq_enable |= DNET_INTR_ENB_TX_FIFOAE; + dnet_writel(bp, irq_enable, INTR_ENB); + } + + /* free the buffer */ + dev_kfree_skb(skb); + + spin_unlock_irqrestore(&bp->lock, flags); + + dev->trans_start = jiffies; + + return 0; +} + +static void dnet_reset_hw(struct dnet *bp) +{ + /* put ts_mac in IDLE state i.e. disable rx/tx */ + dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); + + /* + * RX FIFO almost full threshold: only cmd FIFO almost full is + * implemented for RX side + */ + dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); + /* + * TX FIFO almost empty threshold: only data FIFO almost empty + * is implemented for TX side + */ + dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); + + /* flush rx/tx fifos */ + dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, + SYS_CTL); + msleep(1); + dnet_writel(bp, 0, SYS_CTL); +} + +static void dnet_init_hw(struct dnet *bp) +{ + u32 config; + + dnet_reset_hw(bp); + __dnet_set_hwaddr(bp); + + config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); + + if (bp->dev->flags & IFF_PROMISC) + /* Copy All Frames */ + config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; + if (!(bp->dev->flags & IFF_BROADCAST)) + /* No BroadCast */ + config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; + + config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | + DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | + DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | + DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; + + dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); + + /* clear irq before enabling them */ + config = dnet_readl(bp, INTR_SRC); + + /* enable RX/TX interrupt, recv packet ready interrupt */ + dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | + DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | + DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | + DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | + DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); +} + +static int dnet_open(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + + /* if the phy is not yet register, retry later */ + if (!bp->phy_dev) + return -EAGAIN; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EADDRNOTAVAIL; + + napi_enable(&bp->napi); + dnet_init_hw(bp); + + phy_start_aneg(bp->phy_dev); + + /* schedule a link state check */ + phy_start(bp->phy_dev); + + netif_start_queue(dev); + + return 0; +} + +static int dnet_close(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&bp->napi); + + if (bp->phy_dev) + phy_stop(bp->phy_dev); + + dnet_reset_hw(bp); + netif_carrier_off(dev); + + return 0; +} + +static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) +{ + pr_debug("%s\n", __func__); + pr_debug("----------------------------- RX statistics " + "-------------------------------\n"); + pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); + pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); + pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); + pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); + pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); + pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); + pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); + pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); + pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); + pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); + pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); + pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); + pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); + pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); + pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); + pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); + pr_debug("----------------------------- TX statistics " + "-------------------------------\n"); + pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); + pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); + pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); + pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); + pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); + pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); + pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); + pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); +} + +static struct net_device_stats *dnet_get_stats(struct net_device *dev) +{ + + struct dnet *bp = netdev_priv(dev); + struct net_device_stats *nstat = &dev->stats; + struct dnet_stats *hwstat = &bp->hw_stats; + + /* read stats from hardware */ + dnet_update_stats(bp); + + /* Convert HW stats into netdevice stats */ + nstat->rx_errors = (hwstat->rx_len_chk_err + + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + + /* ignore IGP violation error + hwstat->rx_ipg_viol + */ + hwstat->rx_crc_err + + hwstat->rx_pre_shrink + + hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); + nstat->tx_errors = hwstat->tx_bad_fcs; + nstat->rx_length_errors = (hwstat->rx_len_chk_err + + hwstat->rx_lng_frm + + hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); + nstat->rx_crc_errors = hwstat->rx_crc_err; + nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; + nstat->rx_packets = hwstat->rx_ok_pkt; + nstat->tx_packets = (hwstat->tx_unicast + + hwstat->tx_multicast + hwstat->tx_brdcast); + nstat->rx_bytes = hwstat->rx_byte; + nstat->tx_bytes = hwstat->tx_byte; + nstat->multicast = hwstat->rx_multicast; + nstat->rx_missed_errors = hwstat->rx_pkt_ignr; + + dnet_print_pretty_hwstats(hwstat); + + return nstat; +} + +static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, if_mii(rq), cmd); +} + +static void dnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, "0"); +} + +static const struct ethtool_ops dnet_ethtool_ops = { + .get_settings = dnet_get_settings, + .set_settings = dnet_set_settings, + .get_drvinfo = dnet_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops dnet_netdev_ops = { + .ndo_open = dnet_open, + .ndo_stop = dnet_close, + .ndo_get_stats = dnet_get_stats, + .ndo_start_xmit = dnet_start_xmit, + .ndo_do_ioctl = dnet_ioctl, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __devinit dnet_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *dev; + struct dnet *bp; + struct phy_device *phydev; + int err = -ENXIO; + unsigned int mem_base, mem_size, irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no mmio resource defined\n"); + goto err_out; + } + mem_base = res->start; + mem_size = resource_size(res); + irq = platform_get_irq(pdev, 0); + + if (!request_mem_region(mem_base, mem_size, DRV_NAME)) { + dev_err(&pdev->dev, "no memory region available\n"); + err = -EBUSY; + goto err_out; + } + + err = -ENOMEM; + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) { + dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); + goto err_out; + } + + /* TODO: Actually, we have some interesting features... */ + dev->features |= 0; + + bp = netdev_priv(dev); + bp->dev = dev; + + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&bp->lock); + + bp->regs = ioremap(mem_base, mem_size); + if (!bp->regs) { + dev_err(&pdev->dev, "failed to map registers, aborting.\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + dev->irq = irq; + err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); + if (err) { + dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", + irq, err); + goto err_out_iounmap; + } + + dev->netdev_ops = &dnet_netdev_ops; + netif_napi_add(dev, &bp->napi, dnet_poll, 64); + dev->ethtool_ops = &dnet_ethtool_ops; + + dev->base_addr = (unsigned long)bp->regs; + + bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; + + dnet_get_hwaddr(bp); + + if (!is_valid_ether_addr(dev->dev_addr)) { + /* choose a random ethernet address */ + random_ether_addr(dev->dev_addr); + __dnet_set_hwaddr(bp); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_free_irq; + } + + /* register the PHY board fixup (for Marvell 88E1111) */ + err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, + dnet_phy_marvell_fixup); + /* we can live without it, so just issue a warning */ + if (err) + dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); + + if (dnet_mii_init(bp) != 0) + goto err_out_unregister_netdev; + + dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", + bp->regs, mem_base, dev->irq, dev->dev_addr); + dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n", + (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", + (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", + (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", + (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); + phydev = bp->phy_dev; + dev_info(&pdev->dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, phydev->dev.bus_id, phydev->irq); + + return 0; + +err_out_unregister_netdev: + unregister_netdev(dev); +err_out_free_irq: + free_irq(dev->irq, dev); +err_out_iounmap: + iounmap(bp->regs); +err_out_free_dev: + free_netdev(dev); +err_out: + return err; +} + +static int __devexit dnet_remove(struct platform_device *pdev) +{ + + struct net_device *dev; + struct dnet *bp; + + dev = platform_get_drvdata(pdev); + + if (dev) { + bp = netdev_priv(dev); + if (bp->phy_dev) + phy_disconnect(bp->phy_dev); + mdiobus_unregister(bp->mii_bus); + kfree(bp->mii_bus->irq); + mdiobus_free(bp->mii_bus); + unregister_netdev(dev); + free_irq(dev->irq, dev); + iounmap(bp->regs); + free_netdev(dev); + } + + return 0; +} + +static struct platform_driver dnet_driver = { + .probe = dnet_probe, + .remove = __devexit_p(dnet_remove), + .driver = { + .name = "dnet", + }, +}; + +static int __init dnet_init(void) +{ + return platform_driver_register(&dnet_driver); +} + +static void __exit dnet_exit(void) +{ + platform_driver_unregister(&dnet_driver); +} + +module_init(dnet_init); +module_exit(dnet_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Dave DNET Ethernet driver"); +MODULE_AUTHOR("Ilya Yanok , " + "Matteo Vit "); diff --git a/drivers/net/dnet.h b/drivers/net/dnet.h new file mode 100644 index 00000000000..37f5b30fa78 --- /dev/null +++ b/drivers/net/dnet.h @@ -0,0 +1,225 @@ +/* + * Dave DNET Ethernet Controller driver + * + * Copyright (C) 2008 Dave S.r.l. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _DNET_H +#define _DNET_H + +#define DRV_NAME "dnet" +#define DRV_VERSION "0.9.1" +#define PFX DRV_NAME ": " + +/* Register access macros */ +#define dnet_writel(port, value, reg) \ + writel((value), (port)->regs + DNET_##reg) +#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg) + +/* ALL DNET FIFO REGISTERS */ +#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */ +#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */ +#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */ +#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */ + +/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */ +#define DNET_VERCAPS 0x100 /* VERCAPS */ +#define DNET_INTR_SRC 0x104 /* INTR_SRC */ +#define DNET_INTR_ENB 0x108 /* INTR_ENB */ +#define DNET_RX_STATUS 0x10C /* RX_STATUS */ +#define DNET_TX_STATUS 0x110 /* TX_STATUS */ +#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */ +#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */ +#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */ +#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */ +#define DNET_SYS_CTL 0x124 /* SYS_CTL */ +#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */ +#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */ +#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */ + +/* ALL DNET MAC REGISTERS */ +#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */ +#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */ + +/* ALL DNET RX STATISTICS COUNTERS */ +#define DNET_RX_PKT_IGNR_CNT 0x300 +#define DNET_RX_LEN_CHK_ERR_CNT 0x304 +#define DNET_RX_LNG_FRM_CNT 0x308 +#define DNET_RX_SHRT_FRM_CNT 0x30C +#define DNET_RX_IPG_VIOL_CNT 0x310 +#define DNET_RX_CRC_ERR_CNT 0x314 +#define DNET_RX_OK_PKT_CNT 0x318 +#define DNET_RX_CTL_FRM_CNT 0x31C +#define DNET_RX_PAUSE_FRM_CNT 0x320 +#define DNET_RX_MULTICAST_CNT 0x324 +#define DNET_RX_BROADCAST_CNT 0x328 +#define DNET_RX_VLAN_TAG_CNT 0x32C +#define DNET_RX_PRE_SHRINK_CNT 0x330 +#define DNET_RX_DRIB_NIB_CNT 0x334 +#define DNET_RX_UNSUP_OPCD_CNT 0x338 +#define DNET_RX_BYTE_CNT 0x33C + +/* DNET TX STATISTICS COUNTERS */ +#define DNET_TX_UNICAST_CNT 0x400 +#define DNET_TX_PAUSE_FRM_CNT 0x404 +#define DNET_TX_MULTICAST_CNT 0x408 +#define DNET_TX_BRDCAST_CNT 0x40C +#define DNET_TX_VLAN_TAG_CNT 0x410 +#define DNET_TX_BAD_FCS_CNT 0x414 +#define DNET_TX_JUMBO_CNT 0x418 +#define DNET_TX_BYTE_CNT 0x41C + +/* SOME INTERNAL MAC-CORE REGISTER */ +#define DNET_INTERNAL_MODE_REG 0x0 +#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2 +#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4 +#define DNET_INTERNAL_IGP_REG 0x8 +#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa +#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc +#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe +#define DNET_INTERNAL_TX_RX_STS_REG 0x12 +#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14 +#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16 + +#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14) + +#define DNET_INTERNAL_WRITE (1 << 31) + +/* MAC-CORE REGISTER FIELDS */ + +/* MAC-CORE MODE REGISTER FIELDS */ +#define DNET_INTERNAL_MODE_GBITEN (1 << 0) +#define DNET_INTERNAL_MODE_FCEN (1 << 1) +#define DNET_INTERNAL_MODE_RXEN (1 << 2) +#define DNET_INTERNAL_MODE_TXEN (1 << 3) + +/* MAC-CORE RXTX CONTROL REGISTER FIELDS */ +#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8) +#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7) +#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4) +#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3) +#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2) +#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1) +#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0) +#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6) +#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5) + +/* SYSTEM CONTROL REGISTER FIELDS */ +#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0) +#define DNET_SYS_CTL_SENDPAUSE (1 << 2) +#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3) +#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4) + +/* TX STATUS REGISTER FIELDS */ +#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2) +#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1) + +/* INTERRUPT SOURCE REGISTER FIELDS */ +#define DNET_INTR_SRC_TX_PKTSENT (1 << 0) +#define DNET_INTR_SRC_TX_FIFOAF (1 << 1) +#define DNET_INTR_SRC_TX_FIFOAE (1 << 2) +#define DNET_INTR_SRC_TX_DISCFRM (1 << 3) +#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4) +#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8) +#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9) +#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10) +#define DNET_INTR_SRC_TX_SUMMARY (1 << 16) +#define DNET_INTR_SRC_RX_SUMMARY (1 << 17) +#define DNET_INTR_SRC_PHY (1 << 19) + +/* INTERRUPT ENABLE REGISTER FIELDS */ +#define DNET_INTR_ENB_TX_PKTSENT (1 << 0) +#define DNET_INTR_ENB_TX_FIFOAF (1 << 1) +#define DNET_INTR_ENB_TX_FIFOAE (1 << 2) +#define DNET_INTR_ENB_TX_DISCFRM (1 << 3) +#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4) +#define DNET_INTR_ENB_RX_PKTRDY (1 << 8) +#define DNET_INTR_ENB_RX_FIFOAF (1 << 9) +#define DNET_INTR_ENB_RX_FIFOERR (1 << 10) +#define DNET_INTR_ENB_RX_ERROR (1 << 11) +#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12) +#define DNET_INTR_ENB_RX_FIFOAE (1 << 13) +#define DNET_INTR_ENB_TX_SUMMARY (1 << 16) +#define DNET_INTR_ENB_RX_SUMMARY (1 << 17) +#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18) + +/* default values: + * almost empty = less than one full sized ethernet frame (no jumbo) inside + * the fifo almost full = can write less than one full sized ethernet frame + * (no jumbo) inside the fifo + */ +#define DNET_CFG_TX_FIFO_FULL_THRES 25 +#define DNET_CFG_RX_FIFO_FULL_THRES 20 + +/* + * Capabilities. Used by the driver to know the capabilities that the ethernet + * controller inside the FPGA have. + */ + +#define DNET_HAS_MDIO (1 << 0) +#define DNET_HAS_IRQ (1 << 1) +#define DNET_HAS_GIGABIT (1 << 2) +#define DNET_HAS_DMA (1 << 3) + +#define DNET_HAS_MII (1 << 4) /* or GMII */ +#define DNET_HAS_RMII (1 << 5) /* or RGMII */ + +#define DNET_CAPS_MASK 0xFFFF + +#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */ +#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */ +#define DNET_FIFO_TX_DATA_AE_TH 384 + +#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */ + +/* + * Hardware-collected statistics. + */ +struct dnet_stats { + u32 rx_pkt_ignr; + u32 rx_len_chk_err; + u32 rx_lng_frm; + u32 rx_shrt_frm; + u32 rx_ipg_viol; + u32 rx_crc_err; + u32 rx_ok_pkt; + u32 rx_ctl_frm; + u32 rx_pause_frm; + u32 rx_multicast; + u32 rx_broadcast; + u32 rx_vlan_tag; + u32 rx_pre_shrink; + u32 rx_drib_nib; + u32 rx_unsup_opcd; + u32 rx_byte; + u32 tx_unicast; + u32 tx_pause_frm; + u32 tx_multicast; + u32 tx_brdcast; + u32 tx_vlan_tag; + u32 tx_bad_fcs; + u32 tx_jumbo; + u32 tx_byte; +}; + +struct dnet { + void __iomem *regs; + spinlock_t lock; + struct platform_device *pdev; + struct net_device *dev; + struct dnet_stats hw_stats; + unsigned int capabilities; /* read from FPGA */ + struct napi_struct napi; + + /* PHY stuff */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + unsigned int link; + unsigned int speed; + unsigned int duplex; +}; + +#endif /* _DNET_H */ -- cgit v1.2.3-70-g09d2 From 2c5849ea38fdad477d72dcf1c8c4842db4b33aae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 11 Mar 2009 23:28:57 -0700 Subject: dnet: Fix warnings on 64-bit. Signed-off-by: David S. Miller --- drivers/net/dnet.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 92c3bd3a472..4b96974fe76 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -553,8 +553,8 @@ static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_status = dnet_readl(bp, TX_STATUS); - pr_debug("start_xmit: len %u head %p data %p tail %p end %p\n", - skb->len, skb->head, skb->data, skb->tail, skb->end); + pr_debug("start_xmit: len %u head %p data %p\n", + skb->len, skb->head, skb->data); dnet_print_skb(skb); /* frame size (words) */ @@ -564,11 +564,11 @@ static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_status = dnet_readl(bp, TX_STATUS); - bufp = (unsigned int *)(((u32) skb->data) & 0xFFFFFFFC); + bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); wrsz = (u32) skb->len + 3; - wrsz += ((u32) skb->data) & 0x3; + wrsz += ((unsigned long) skb->data) & 0x3; wrsz >>= 2; - tx_cmd = ((((unsigned int)(skb->data)) & 0x03) << 16) | (u32) skb->len; + tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; /* check if there is enough room for the current frame */ if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { -- cgit v1.2.3-70-g09d2 From 6b7c5b947c671a96e39f9526a5fd70c178b8dfd1 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Wed, 11 Mar 2009 23:32:03 -0700 Subject: net: Add be2net driver. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller --- MAINTAINERS | 9 + drivers/net/Kconfig | 2 + drivers/net/Makefile | 1 + drivers/net/benet/Kconfig | 7 + drivers/net/benet/Makefile | 7 + drivers/net/benet/be.h | 327 +++++++ drivers/net/benet/be_cmds.c | 861 ++++++++++++++++++ drivers/net/benet/be_cmds.h | 688 +++++++++++++++ drivers/net/benet/be_ethtool.c | 362 ++++++++ drivers/net/benet/be_hw.h | 211 +++++ drivers/net/benet/be_main.c | 1903 ++++++++++++++++++++++++++++++++++++++++ 11 files changed, 4378 insertions(+) create mode 100644 drivers/net/benet/Kconfig create mode 100644 drivers/net/benet/Makefile create mode 100644 drivers/net/benet/be.h create mode 100644 drivers/net/benet/be_cmds.c create mode 100644 drivers/net/benet/be_cmds.h create mode 100644 drivers/net/benet/be_ethtool.c create mode 100644 drivers/net/benet/be_hw.h create mode 100644 drivers/net/benet/be_main.c diff --git a/MAINTAINERS b/MAINTAINERS index 1c2ca1dc66f..43934437511 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3880,6 +3880,15 @@ L: linux-ide@vger.kernel.org T: git kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git S: Supported +SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER +P: Sathya Perla +M: sathyap@serverengines.com +P: Subbu Seetharaman +M: subbus@serverengines.com +L: netdev@vger.kernel.org +W: http://www.serverengines.com +S: Supported + SFC NETWORK DRIVER P: Steve Hodgson P: Ben Hutchings diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5c28b06dac6..435e2e3a82c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2630,6 +2630,8 @@ config QLGE source "drivers/net/sfc/Kconfig" +source "drivers/net/benet/Kconfig" + endif # NETDEV_10000 source "drivers/net/tokenring/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 6d9bba58b9a..471baaff229 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_TEHUTI) += tehuti.o obj-$(CONFIG_ENIC) += enic/ obj-$(CONFIG_JME) += jme.o +obj-$(CONFIG_BE2NET) += benet/ gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o \ diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig new file mode 100644 index 00000000000..c6934f179c0 --- /dev/null +++ b/drivers/net/benet/Kconfig @@ -0,0 +1,7 @@ +config BE2NET + tristate "ServerEngines' 10Gbps NIC - BladeEngine 2" + depends on PCI && INET + select INET_LRO + help + This driver implements the NIC functionality for ServerEngines' + 10Gbps network adapter - BladeEngine 2. diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile new file mode 100644 index 00000000000..a60cd805113 --- /dev/null +++ b/drivers/net/benet/Makefile @@ -0,0 +1,7 @@ +# +# Makefile to build the network driver for ServerEngine's BladeEngine. +# + +obj-$(CONFIG_BE2NET) += be2net.o + +be2net-y := be_main.o be_cmds.o be_ethtool.o diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h new file mode 100644 index 00000000000..63d593d5315 --- /dev/null +++ b/drivers/net/benet/be.h @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +#ifndef BE_H +#define BE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "be_hw.h" + +#define DRV_VER "2.0.348" +#define DRV_NAME "be2net" +#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" +#define DRV_DESC BE_NAME "Driver" + +/* Number of bytes of an RX frame that are copied to skb->data */ +#define BE_HDR_LEN 64 +#define BE_MAX_JUMBO_FRAME_SIZE 9018 +#define BE_MIN_MTU 256 + +#define BE_NUM_VLANS_SUPPORTED 64 +#define BE_MAX_EQD 96 +#define BE_MAX_TX_FRAG_COUNT 30 + +#define EVNT_Q_LEN 1024 +#define TX_Q_LEN 2048 +#define TX_CQ_LEN 1024 +#define RX_Q_LEN 1024 /* Does not support any other value */ +#define RX_CQ_LEN 1024 +#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */ +#define MCC_CQ_LEN 256 + +#define BE_NAPI_WEIGHT 64 +#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ +#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) + +#define BE_MAX_LRO_DESCRIPTORS 16 +#define BE_MAX_FRAGS_PER_FRAME 16 + +struct be_dma_mem { + void *va; + dma_addr_t dma; + u32 size; +}; + +struct be_queue_info { + struct be_dma_mem dma_mem; + u16 len; + u16 entry_size; /* Size of an element in the queue */ + u16 id; + u16 tail, head; + bool created; + atomic_t used; /* Number of valid elements in the queue */ +}; + +struct be_ctrl_info { + u8 __iomem *csr; + u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* PCI config space */ + int pci_func; + + /* Mbox used for cmd request/response */ + spinlock_t cmd_lock; /* For serializing cmds to BE card */ + struct be_dma_mem mbox_mem; + /* Mbox mem is adjusted to align to 16 bytes. The allocated addr + * is stored for freeing purpose */ + struct be_dma_mem mbox_mem_alloced; +}; + +#include "be_cmds.h" + +struct be_drvr_stats { + u32 be_tx_reqs; /* number of TX requests initiated */ + u32 be_tx_stops; /* number of times TX Q was stopped */ + u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */ + u32 be_tx_wrbs; /* number of tx WRBs used */ + u32 be_tx_events; /* number of tx completion events */ + u32 be_tx_compl; /* number of tx completion entries processed */ + u64 be_tx_jiffies; + ulong be_tx_bytes; + ulong be_tx_bytes_prev; + u32 be_tx_rate; + + u32 cache_barrier[16]; + + u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */ + u32 be_polls; /* number of times NAPI called poll function */ + u32 be_rx_events; /* number of ucast rx completion events */ + u32 be_rx_compl; /* number of rx completion entries processed */ + u32 be_lro_hgram_data[8]; /* histogram of LRO data packets */ + u32 be_lro_hgram_ack[8]; /* histogram of LRO ACKs */ + u64 be_rx_jiffies; + ulong be_rx_bytes; + ulong be_rx_bytes_prev; + u32 be_rx_rate; + /* number of non ether type II frames dropped where + * frame len > length field of Mac Hdr */ + u32 be_802_3_dropped_frames; + /* number of non ether type II frames malformed where + * in frame len < length field of Mac Hdr */ + u32 be_802_3_malformed_frames; + u32 be_rxcp_err; /* Num rx completion entries w/ err set. */ + ulong rx_fps_jiffies; /* jiffies at last FPS calc */ + u32 be_rx_frags; + u32 be_prev_rx_frags; + u32 be_rx_fps; /* Rx frags per second */ +}; + +struct be_stats_obj { + struct be_drvr_stats drvr_stats; + struct net_device_stats net_stats; + struct be_dma_mem cmd; +}; + +struct be_eq_obj { + struct be_queue_info q; + char desc[32]; + + /* Adaptive interrupt coalescing (AIC) info */ + bool enable_aic; + u16 min_eqd; /* in usecs */ + u16 max_eqd; /* in usecs */ + u16 cur_eqd; /* in usecs */ + + struct napi_struct napi; +}; + +struct be_tx_obj { + struct be_queue_info q; + struct be_queue_info cq; + /* Remember the skbs that were transmitted */ + struct sk_buff *sent_skb_list[TX_Q_LEN]; +}; + +/* Struct to remember the pages posted for rx frags */ +struct be_rx_page_info { + struct page *page; + dma_addr_t bus; + u16 page_offset; + bool last_page_user; +}; + +struct be_rx_obj { + struct be_queue_info q; + struct be_queue_info cq; + struct be_rx_page_info page_info_tbl[RX_Q_LEN]; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS]; +}; + +#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ +struct be_adapter { + struct pci_dev *pdev; + struct net_device *netdev; + + /* Mbox, pci config, csr address information */ + struct be_ctrl_info ctrl; + + struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; + bool msix_enabled; + bool isr_registered; + + /* TX Rings */ + struct be_eq_obj tx_eq; + struct be_tx_obj tx_obj; + + u32 cache_line_break[8]; + + /* Rx rings */ + struct be_eq_obj rx_eq; + struct be_rx_obj rx_obj; + u32 big_page_size; /* Compounded page size shared by rx wrbs */ + + struct vlan_group *vlan_grp; + u16 num_vlans; + u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; + + struct be_stats_obj stats; + /* Work queue used to perform periodic tasks like getting statistics */ + struct delayed_work work; + + /* Ethtool knobs and info */ + bool rx_csum; /* BE card must perform rx-checksumming */ + u32 max_rx_coal; + char fw_ver[FW_VER_LEN]; + u32 if_handle; /* Used to configure filtering */ + u32 pmac_id; /* MAC addr handle used by BE card */ + + struct be_link_info link; + u32 port_num; +}; + +extern struct ethtool_ops be_ethtool_ops; + +#define drvr_stats(adapter) (&adapter->stats.drvr_stats) + +#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) + +static inline u32 MODULO(u16 val, u16 limit) +{ + BUG_ON(limit & (limit - 1)); + return val & (limit - 1); +} + +static inline void index_adv(u16 *index, u16 val, u16 limit) +{ + *index = MODULO((*index + val), limit); +} + +static inline void index_inc(u16 *index, u16 limit) +{ + *index = MODULO((*index + 1), limit); +} + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) + +/* Returns number of pages spanned by the data starting at the given addr */ +#define PAGES_4K_SPANNED(_address, size) \ + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) + +/* Byte offset into the page corresponding to given address */ +#define OFFSET_IN_PAGE(addr) \ + ((size_t)(addr) & (PAGE_SIZE_4K-1)) + +/* Returns bit offset within a DWORD of a bitfield */ +#define AMAP_BIT_OFFSET(_struct, field) \ + (((size_t)&(((_struct *)0)->field))%32) + +/* Returns the bit mask of the field that is NOT shifted into location. */ +static inline u32 amap_mask(u32 bitsize) +{ + return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); +} + +static inline void +amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) +{ + u32 *dw = (u32 *) ptr + dw_offset; + *dw &= ~(mask << offset); + *dw |= (mask & value) << offset; +} + +#define AMAP_SET_BITS(_struct, field, ptr, val) \ + amap_set(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field), \ + val) + +static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) +{ + u32 *dw = (u32 *) ptr; + return mask & (*(dw + dw_offset) >> offset); +} + +#define AMAP_GET_BITS(_struct, field, ptr) \ + amap_get(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field)) + +#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) +#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) +static inline void swap_dws(void *wrb, int len) +{ +#ifdef __BIG_ENDIAN + u32 *dw = wrb; + BUG_ON(len % 4); + do { + *dw = cpu_to_le32(*dw); + dw++; + len -= 4; + } while (len); +#endif /* __BIG_ENDIAN */ +} + +static inline u8 is_tcp_pkt(struct sk_buff *skb) +{ + u8 val = 0; + + if (ip_hdr(skb)->version == 4) + val = (ip_hdr(skb)->protocol == IPPROTO_TCP); + else if (ip_hdr(skb)->version == 6) + val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP); + + return val; +} + +static inline u8 is_udp_pkt(struct sk_buff *skb) +{ + u8 val = 0; + + if (ip_hdr(skb)->version == 4) + val = (ip_hdr(skb)->protocol == IPPROTO_UDP); + else if (ip_hdr(skb)->version == 6) + val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP); + + return val; +} + +#endif /* BE_H */ diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c new file mode 100644 index 00000000000..d444aed962b --- /dev/null +++ b/drivers/net/benet/be_cmds.c @@ -0,0 +1,861 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +#include "be.h" + +static int be_mbox_db_ready_wait(void __iomem *db) +{ + int cnt = 0, wait = 5; + u32 ready; + + do { + ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; + if (ready) + break; + + if (cnt > 200000) { + printk(KERN_WARNING DRV_NAME + ": mbox_db poll timed out\n"); + return -1; + } + + if (cnt > 50) + wait = 200; + cnt += wait; + udelay(wait); + } while (true); + + return 0; +} + +/* + * Insert the mailbox address into the doorbell in two steps + */ +static int be_mbox_db_ring(struct be_ctrl_info *ctrl) +{ + int status; + u16 compl_status, extd_status; + u32 val = 0; + void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; + struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; + struct be_mcc_mailbox *mbox = mbox_mem->va; + struct be_mcc_cq_entry *cqe = &mbox->cqe; + + memset(cqe, 0, sizeof(*cqe)); + + val &= ~MPU_MAILBOX_DB_RDY_MASK; + val |= MPU_MAILBOX_DB_HI_MASK; + /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ + val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; + iowrite32(val, db); + + /* wait for ready to be set */ + status = be_mbox_db_ready_wait(db); + if (status != 0) + return status; + + val = 0; + val &= ~MPU_MAILBOX_DB_RDY_MASK; + val &= ~MPU_MAILBOX_DB_HI_MASK; + /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ + val |= (u32)(mbox_mem->dma >> 4) << 2; + iowrite32(val, db); + + status = be_mbox_db_ready_wait(db); + if (status != 0) + return status; + + /* compl entry has been made now */ + be_dws_le_to_cpu(cqe, sizeof(*cqe)); + if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) { + printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n"); + return -1; + } + + compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) & + CQE_STATUS_COMPL_MASK; + if (compl_status != MCC_STATUS_SUCCESS) { + extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) & + CQE_STATUS_EXTD_MASK; + printk(KERN_WARNING DRV_NAME + ": ERROR in cmd compl. status(compl/extd)=%d/%d\n", + compl_status, extd_status); + } + + return compl_status; +} + +static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) +{ + u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); + + *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; + if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) + return -1; + else + return 0; +} + +static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage) +{ + u16 stage, cnt, error; + for (cnt = 0; cnt < 5000; cnt++) { + error = be_POST_stage_get(ctrl, &stage); + if (error) + return -1; + + if (stage == poll_stage) + break; + udelay(1000); + } + if (stage != poll_stage) + return -1; + return 0; +} + + +int be_cmd_POST(struct be_ctrl_info *ctrl) +{ + u16 stage, error; + + error = be_POST_stage_get(ctrl, &stage); + if (error) + goto err; + + if (stage == POST_STAGE_ARMFW_RDY) + return 0; + + if (stage != POST_STAGE_AWAITING_HOST_RDY) + goto err; + + /* On awaiting host rdy, reset and again poll on awaiting host rdy */ + iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); + error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY); + if (error) + goto err; + + /* Now kickoff POST and poll on armfw ready */ + iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); + error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY); + if (error) + goto err; + + return 0; +err: + printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage); + return -1; +} + +static inline void *embedded_payload(struct be_mcc_wrb *wrb) +{ + return wrb->payload.embedded_payload; +} + +static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) +{ + return &wrb->payload.sgl[0]; +} + +/* Don't touch the hdr after it's prepared */ +static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, + bool embedded, u8 sge_cnt) +{ + if (embedded) + wrb->embedded |= MCC_WRB_EMBEDDED_MASK; + else + wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << + MCC_WRB_SGE_CNT_SHIFT; + wrb->payload_length = payload_len; + be_dws_cpu_to_le(wrb, 20); +} + +/* Don't touch the hdr after it's prepared */ +static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, + u8 subsystem, u8 opcode, int cmd_len) +{ + req_hdr->opcode = opcode; + req_hdr->subsystem = subsystem; + req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); +} + +static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, + struct be_dma_mem *mem) +{ + int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); + u64 dma = (u64)mem->dma; + + for (i = 0; i < buf_pages; i++) { + pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); + pages[i].hi = cpu_to_le32(upper_32_bits(dma)); + dma += PAGE_SIZE_4K; + } +} + +/* Converts interrupt delay in microseconds to multiplier value */ +static u32 eq_delay_to_mult(u32 usec_delay) +{ +#define MAX_INTR_RATE 651042 + const u32 round = 10; + u32 multiplier; + + if (usec_delay == 0) + multiplier = 0; + else { + u32 interrupt_rate = 1000000 / usec_delay; + /* Max delay, corresponding to the lowest interrupt rate */ + if (interrupt_rate == 0) + multiplier = 1023; + else { + multiplier = (MAX_INTR_RATE - interrupt_rate) * round; + multiplier /= interrupt_rate; + /* Round the multiplier to the closest value.*/ + multiplier = (multiplier + round/2) / round; + multiplier = min(multiplier, (u32)1023); + } + } + return multiplier; +} + +static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) +{ + return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; +} + +int be_cmd_eq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *eq, int eq_delay) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_eq_create *req = embedded_payload(wrb); + struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + struct be_dma_mem *q_mem = &eq->dma_mem; + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_EQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + AMAP_SET_BITS(struct amap_eq_context, func, req->context, + ctrl->pci_func); + AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); + /* 4byte eqe*/ + AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); + AMAP_SET_BITS(struct amap_eq_context, count, req->context, + __ilog2_u32(eq->len/256)); + AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, + eq_delay_to_mult(eq_delay)); + be_dws_cpu_to_le(req->context, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_db_ring(ctrl); + if (!status) { + eq->id = le16_to_cpu(resp->eq_id); + eq->created = true; + } + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, + u8 type, bool permanent, u32 if_handle) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_mac_query *req = embedded_payload(wrb); + struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); + + req->type = type; + if (permanent) { + req->permanent = 1; + } else { + req->if_id = cpu_to_le16((u16)if_handle); + req->permanent = 0; + } + + status = be_mbox_db_ring(ctrl); + if (!status) + memcpy(mac_addr, resp->mac.addr, ETH_ALEN); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, + u32 if_id, u32 *pmac_id) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_pmac_add *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); + + req->if_id = cpu_to_le32(if_id); + memcpy(req->mac_address, mac_addr, ETH_ALEN); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); + *pmac_id = le32_to_cpu(resp->pmac_id); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_pmac_del *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); + + req->if_id = cpu_to_le32(if_id); + req->pmac_id = cpu_to_le32(pmac_id); + + status = be_mbox_db_ring(ctrl); + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +int be_cmd_cq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, struct be_queue_info *eq, + bool sol_evts, bool no_delay, int coalesce_wm) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_cq_create *req = embedded_payload(wrb); + struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + struct be_dma_mem *q_mem = &cq->dma_mem; + void *ctxt = &req->context; + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_CQ_CREATE, sizeof(*req)); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context, count, ctxt, + __ilog2_u32(cq->len/256)); + AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); + AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0); + AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_db_ring(ctrl); + if (!status) { + cq->id = le16_to_cpu(resp->cq_id); + cq->created = true; + } + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +int be_cmd_txq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *txq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb); + struct be_dma_mem *q_mem = &txq->dma_mem; + void *ctxt = &req->context; + int status; + u32 len_encoded; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, + sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->ulp_num = BE_ULP1_NUM; + req->type = BE_ETH_TX_RING_TYPE_STANDARD; + + len_encoded = fls(txq->len); /* log2(len) + 1 */ + if (len_encoded == 16) + len_encoded = 0; + AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded); + AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, + ctrl->pci_func); + AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); + txq->id = le16_to_cpu(resp->cid); + txq->created = true; + } + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +int be_cmd_rxq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *rxq, u16 cq_id, u16 frag_size, + u16 max_frame_size, u32 if_id, u32 rss) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb); + struct be_dma_mem *q_mem = &rxq->dma_mem; + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, + sizeof(*req)); + + req->cq_id = cpu_to_le16(cq_id); + req->frag_size = fls(frag_size) - 1; + req->num_pages = 2; + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + req->interface_id = cpu_to_le32(if_id); + req->max_frame_size = cpu_to_le16(max_frame_size); + req->rss_queue = cpu_to_le32(rss); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); + rxq->id = le16_to_cpu(resp->id); + rxq->created = true; + } + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +/* Generic destroyer function for all types of queues */ +int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, + int queue_type) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_q_destroy *req = embedded_payload(wrb); + u8 subsys = 0, opcode = 0; + int status; + + spin_lock(&ctrl->cmd_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + switch (queue_type) { + case QTYPE_EQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_EQ_DESTROY; + break; + case QTYPE_CQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_CQ_DESTROY; + break; + case QTYPE_TXQ: + subsys = CMD_SUBSYSTEM_ETH; + opcode = OPCODE_ETH_TX_DESTROY; + break; + case QTYPE_RXQ: + subsys = CMD_SUBSYSTEM_ETH; + opcode = OPCODE_ETH_RX_DESTROY; + break; + default: + printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); + status = -1; + goto err; + } + be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); + req->id = cpu_to_le16(q->id); + + status = be_mbox_db_ring(ctrl); +err: + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +/* Create an rx filtering policy configuration on an i/f */ +int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, + bool pmac_invalid, u32 *if_handle, u32 *pmac_id) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_if_create *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); + + req->capability_flags = cpu_to_le32(flags); + req->enable_flags = cpu_to_le32(flags); + if (!pmac_invalid) + memcpy(req->mac_addr, mac, ETH_ALEN); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_if_create *resp = embedded_payload(wrb); + *if_handle = le32_to_cpu(resp->interface_id); + if (!pmac_invalid) + *pmac_id = le32_to_cpu(resp->pmac_id); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_if_destroy *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); + + req->interface_id = cpu_to_le32(interface_id); + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + + return status; +} + +/* Get stats is a non embedded command: the request is not embedded inside + * WRB but is a separate dma memory block + */ +int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_get_stats *req = nonemb_cmd->va; + struct be_sge *sge = nonembedded_sgl(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + memset(req, 0, sizeof(*req)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_GET_STATISTICS, sizeof(*req)); + sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); + sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(nonemb_cmd->size); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_get_stats *resp = nonemb_cmd->va; + be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_link_status_query(struct be_ctrl_info *ctrl, + struct be_link_info *link) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_link_status *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_link_status *resp = embedded_payload(wrb); + link->speed = resp->mac_speed; + link->duplex = resp->mac_duplex; + link->fault = resp->mac_fault; + } else { + link->speed = PHY_LINK_SPEED_ZERO; + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); + strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +/* set the EQ delay interval of an EQ to specified value */ +int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + + req->num_eq = cpu_to_le32(1); + req->delay[0].eq_id = cpu_to_le32(eq_id); + req->delay[0].phase = 0; + req->delay[0].delay_multiplier = cpu_to_le32(eqd); + + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, + u32 num, bool untagged, bool promiscuous) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_vlan_config *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); + + req->interface_id = if_id; + req->promiscuous = promiscuous; + req->untagged = untagged; + req->num_vlan = num; + if (!promiscuous) { + memcpy(req->normal_vlan, vtag_array, + req->num_vlan * sizeof(vtag_array[0])); + } + + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_PROMISCUOUS, sizeof(*req)); + + if (port_num) + req->port1_promiscuous = en; + else + req->port0_promiscuous = en; + + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, + u32 num, bool promiscuous) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); + + req->interface_id = if_id; + req->promiscuous = promiscuous; + if (!promiscuous) { + req->num_mac = cpu_to_le16(num); + if (num) + memcpy(req->mac, mac_table, ETH_ALEN * num); + } + + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); + + req->tx_flow_control = cpu_to_le16((u16)tx_fc); + req->rx_flow_control = cpu_to_le16((u16)rx_fc); + + status = be_mbox_db_ring(ctrl); + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_get_flow_control *resp = + embedded_payload(wrb); + *tx_fc = le16_to_cpu(resp->tx_flow_control); + *rx_fc = le16_to_cpu(resp->rx_flow_control); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} + +int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->cmd_lock); + + memset(wrb, 0, sizeof(*wrb)); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); + + status = be_mbox_db_ring(ctrl); + if (!status) { + struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); + *port_num = le32_to_cpu(resp->phys_port); + } + + spin_unlock(&ctrl->cmd_lock); + return status; +} diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h new file mode 100644 index 00000000000..e499e2d5b8c --- /dev/null +++ b/drivers/net/benet/be_cmds.h @@ -0,0 +1,688 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +/* + * The driver sends configuration and managements command requests to the + * firmware in the BE. These requests are communicated to the processor + * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one + * WRB inside a MAILBOX. + * The commands are serviced by the ARM processor in the BladeEngine's MPU. + */ + +struct be_sge { + u32 pa_lo; + u32 pa_hi; + u32 len; +}; + +#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/ +#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ +#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */ +struct be_mcc_wrb { + u32 embedded; /* dword 0 */ + u32 payload_length; /* dword 1 */ + u32 tag0; /* dword 2 */ + u32 tag1; /* dword 3 */ + u32 rsvd; /* dword 4 */ + union { + u8 embedded_payload[236]; /* used by embedded cmds */ + struct be_sge sgl[19]; /* used by non-embedded cmds */ + } payload; +}; + +#define CQE_FLAGS_VALID_MASK (1 << 31) +#define CQE_FLAGS_ASYNC_MASK (1 << 30) +#define CQE_FLAGS_COMPLETED_MASK (1 << 28) +#define CQE_FLAGS_CONSUMED_MASK (1 << 27) + +/* Completion Status */ +enum { + MCC_STATUS_SUCCESS = 0x0, +/* The client does not have sufficient privileges to execute the command */ + MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1, +/* A parameter in the command was invalid. */ + MCC_STATUS_INVALID_PARAMETER = 0x2, +/* There are insufficient chip resources to execute the command */ + MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3, +/* The command is completing because the queue was getting flushed */ + MCC_STATUS_QUEUE_FLUSHING = 0x4, +/* The command is completing with a DMA error */ + MCC_STATUS_DMA_FAILED = 0x5 +}; + +#define CQE_STATUS_COMPL_MASK 0xFFFF +#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ +#define CQE_STATUS_EXTD_MASK 0xFFFF +#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */ + +struct be_mcc_cq_entry { + u32 status; /* dword 0 */ + u32 tag0; /* dword 1 */ + u32 tag1; /* dword 2 */ + u32 flags; /* dword 3 */ +}; + +struct be_mcc_mailbox { + struct be_mcc_wrb wrb; + struct be_mcc_cq_entry cqe; +}; + +#define CMD_SUBSYSTEM_COMMON 0x1 +#define CMD_SUBSYSTEM_ETH 0x3 + +#define OPCODE_COMMON_NTWK_MAC_QUERY 1 +#define OPCODE_COMMON_NTWK_MAC_SET 2 +#define OPCODE_COMMON_NTWK_MULTICAST_SET 3 +#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4 +#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 +#define OPCODE_COMMON_CQ_CREATE 12 +#define OPCODE_COMMON_EQ_CREATE 13 +#define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_NTWK_RX_FILTER 34 +#define OPCODE_COMMON_GET_FW_VERSION 35 +#define OPCODE_COMMON_SET_FLOW_CONTROL 36 +#define OPCODE_COMMON_GET_FLOW_CONTROL 37 +#define OPCODE_COMMON_SET_FRAME_SIZE 39 +#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 +#define OPCODE_COMMON_FIRMWARE_CONFIG 42 +#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 +#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 +#define OPCODE_COMMON_CQ_DESTROY 54 +#define OPCODE_COMMON_EQ_DESTROY 55 +#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 +#define OPCODE_COMMON_NTWK_PMAC_ADD 59 +#define OPCODE_COMMON_NTWK_PMAC_DEL 60 + +#define OPCODE_ETH_ACPI_CONFIG 2 +#define OPCODE_ETH_PROMISCUOUS 3 +#define OPCODE_ETH_GET_STATISTICS 4 +#define OPCODE_ETH_TX_CREATE 7 +#define OPCODE_ETH_RX_CREATE 8 +#define OPCODE_ETH_TX_DESTROY 9 +#define OPCODE_ETH_RX_DESTROY 10 + +struct be_cmd_req_hdr { + u8 opcode; /* dword 0 */ + u8 subsystem; /* dword 0 */ + u8 port_number; /* dword 0 */ + u8 domain; /* dword 0 */ + u32 timeout; /* dword 1 */ + u32 request_length; /* dword 2 */ + u32 rsvd; /* dword 3 */ +}; + +#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ +#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ +struct be_cmd_resp_hdr { + u32 info; /* dword 0 */ + u32 status; /* dword 1 */ + u32 response_length; /* dword 2 */ + u32 actual_resp_len; /* dword 3 */ +}; + +struct phys_addr { + u32 lo; + u32 hi; +}; + +/************************** + * BE Command definitions * + **************************/ + +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_eq_context { + u8 cidx[13]; /* dword 0*/ + u8 rsvd0[3]; /* dword 0*/ + u8 epidx[13]; /* dword 0*/ + u8 valid; /* dword 0*/ + u8 rsvd1; /* dword 0*/ + u8 size; /* dword 0*/ + u8 pidx[13]; /* dword 1*/ + u8 rsvd2[3]; /* dword 1*/ + u8 pd[10]; /* dword 1*/ + u8 count[3]; /* dword 1*/ + u8 solevent; /* dword 1*/ + u8 stalled; /* dword 1*/ + u8 armed; /* dword 1*/ + u8 rsvd3[4]; /* dword 2*/ + u8 func[8]; /* dword 2*/ + u8 rsvd4; /* dword 2*/ + u8 delaymult[10]; /* dword 2*/ + u8 rsvd5[2]; /* dword 2*/ + u8 phase[2]; /* dword 2*/ + u8 nodelay; /* dword 2*/ + u8 rsvd6[4]; /* dword 2*/ + u8 rsvd7[32]; /* dword 3*/ +} __packed; + +struct be_cmd_req_eq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; /* sword */ + u16 rsvd0; /* sword */ + u8 context[sizeof(struct amap_eq_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_eq_create { + struct be_cmd_resp_hdr resp_hdr; + u16 eq_id; /* sword */ + u16 rsvd0; /* sword */ +} __packed; + +/******************** Mac query ***************************/ +enum { + MAC_ADDRESS_TYPE_STORAGE = 0x0, + MAC_ADDRESS_TYPE_NETWORK = 0x1, + MAC_ADDRESS_TYPE_PD = 0x2, + MAC_ADDRESS_TYPE_MANAGEMENT = 0x3 +}; + +struct mac_addr { + u16 size_of_struct; + u8 addr[ETH_ALEN]; +} __packed; + +struct be_cmd_req_mac_query { + struct be_cmd_req_hdr hdr; + u8 type; + u8 permanent; + u16 if_id; +} __packed; + +struct be_cmd_resp_mac_query { + struct be_cmd_resp_hdr hdr; + struct mac_addr mac; +}; + +/******************** PMac Add ***************************/ +struct be_cmd_req_pmac_add { + struct be_cmd_req_hdr hdr; + u32 if_id; + u8 mac_address[ETH_ALEN]; + u8 rsvd0[2]; +} __packed; + +struct be_cmd_resp_pmac_add { + struct be_cmd_resp_hdr hdr; + u32 pmac_id; +}; + +/******************** PMac Del ***************************/ +struct be_cmd_req_pmac_del { + struct be_cmd_req_hdr hdr; + u32 if_id; + u32 pmac_id; +}; + +/******************** Create CQ ***************************/ +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_cq_context { + u8 cidx[11]; /* dword 0*/ + u8 rsvd0; /* dword 0*/ + u8 coalescwm[2]; /* dword 0*/ + u8 nodelay; /* dword 0*/ + u8 epidx[11]; /* dword 0*/ + u8 rsvd1; /* dword 0*/ + u8 count[2]; /* dword 0*/ + u8 valid; /* dword 0*/ + u8 solevent; /* dword 0*/ + u8 eventable; /* dword 0*/ + u8 pidx[11]; /* dword 1*/ + u8 rsvd2; /* dword 1*/ + u8 pd[10]; /* dword 1*/ + u8 eqid[8]; /* dword 1*/ + u8 stalled; /* dword 1*/ + u8 armed; /* dword 1*/ + u8 rsvd3[4]; /* dword 2*/ + u8 func[8]; /* dword 2*/ + u8 rsvd4[20]; /* dword 2*/ + u8 rsvd5[32]; /* dword 3*/ +} __packed; + +struct be_cmd_req_cq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 rsvd0; + u8 context[sizeof(struct amap_cq_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_cq_create { + struct be_cmd_resp_hdr hdr; + u16 cq_id; + u16 rsvd0; +} __packed; + +/******************** Create TxQ ***************************/ +#define BE_ETH_TX_RING_TYPE_STANDARD 2 +#define BE_ULP1_NUM 1 + +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_tx_context { + u8 rsvd0[16]; /* dword 0 */ + u8 tx_ring_size[4]; /* dword 0 */ + u8 rsvd1[26]; /* dword 0 */ + u8 pci_func_id[8]; /* dword 1 */ + u8 rsvd2[9]; /* dword 1 */ + u8 ctx_valid; /* dword 1 */ + u8 cq_id_send[16]; /* dword 2 */ + u8 rsvd3[16]; /* dword 2 */ + u8 rsvd4[32]; /* dword 3 */ + u8 rsvd5[32]; /* dword 4 */ + u8 rsvd6[32]; /* dword 5 */ + u8 rsvd7[32]; /* dword 6 */ + u8 rsvd8[32]; /* dword 7 */ + u8 rsvd9[32]; /* dword 8 */ + u8 rsvd10[32]; /* dword 9 */ + u8 rsvd11[32]; /* dword 10 */ + u8 rsvd12[32]; /* dword 11 */ + u8 rsvd13[32]; /* dword 12 */ + u8 rsvd14[32]; /* dword 13 */ + u8 rsvd15[32]; /* dword 14 */ + u8 rsvd16[32]; /* dword 15 */ +} __packed; + +struct be_cmd_req_eth_tx_create { + struct be_cmd_req_hdr hdr; + u8 num_pages; + u8 ulp_num; + u8 type; + u8 bound_port; + u8 context[sizeof(struct amap_tx_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_eth_tx_create { + struct be_cmd_resp_hdr hdr; + u16 cid; + u16 rsvd0; +} __packed; + +/******************** Create RxQ ***************************/ +struct be_cmd_req_eth_rx_create { + struct be_cmd_req_hdr hdr; + u16 cq_id; + u8 frag_size; + u8 num_pages; + struct phys_addr pages[2]; + u32 interface_id; + u16 max_frame_size; + u16 rsvd0; + u32 rss_queue; +} __packed; + +struct be_cmd_resp_eth_rx_create { + struct be_cmd_resp_hdr hdr; + u16 id; + u8 cpu_id; + u8 rsvd0; +} __packed; + +/******************** Q Destroy ***************************/ +/* Type of Queue to be destroyed */ +enum { + QTYPE_EQ = 1, + QTYPE_CQ, + QTYPE_TXQ, + QTYPE_RXQ +}; + +struct be_cmd_req_q_destroy { + struct be_cmd_req_hdr hdr; + u16 id; + u16 bypass_flush; /* valid only for rx q destroy */ +} __packed; + +/************ I/f Create (it's actually I/f Config Create)**********/ + +/* Capability flags for the i/f */ +enum be_if_flags { + BE_IF_FLAGS_RSS = 0x4, + BE_IF_FLAGS_PROMISCUOUS = 0x8, + BE_IF_FLAGS_BROADCAST = 0x10, + BE_IF_FLAGS_UNTAGGED = 0x20, + BE_IF_FLAGS_ULP = 0x40, + BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80, + BE_IF_FLAGS_VLAN = 0x100, + BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, + BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, + BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 +}; + +/* An RX interface is an object with one or more MAC addresses and + * filtering capabilities. */ +struct be_cmd_req_if_create { + struct be_cmd_req_hdr hdr; + u32 version; /* ignore currntly */ + u32 capability_flags; + u32 enable_flags; + u8 mac_addr[ETH_ALEN]; + u8 rsvd0; + u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */ + u32 vlan_tag; /* not used currently */ +} __packed; + +struct be_cmd_resp_if_create { + struct be_cmd_resp_hdr hdr; + u32 interface_id; + u32 pmac_id; +}; + +/****** I/f Destroy(it's actually I/f Config Destroy )**********/ +struct be_cmd_req_if_destroy { + struct be_cmd_req_hdr hdr; + u32 interface_id; +}; + +/*************** HW Stats Get **********************************/ +struct be_port_rxf_stats { + u32 rx_bytes_lsd; /* dword 0*/ + u32 rx_bytes_msd; /* dword 1*/ + u32 rx_total_frames; /* dword 2*/ + u32 rx_unicast_frames; /* dword 3*/ + u32 rx_multicast_frames; /* dword 4*/ + u32 rx_broadcast_frames; /* dword 5*/ + u32 rx_crc_errors; /* dword 6*/ + u32 rx_alignment_symbol_errors; /* dword 7*/ + u32 rx_pause_frames; /* dword 8*/ + u32 rx_control_frames; /* dword 9*/ + u32 rx_in_range_errors; /* dword 10*/ + u32 rx_out_range_errors; /* dword 11*/ + u32 rx_frame_too_long; /* dword 12*/ + u32 rx_address_match_errors; /* dword 13*/ + u32 rx_vlan_mismatch; /* dword 14*/ + u32 rx_dropped_too_small; /* dword 15*/ + u32 rx_dropped_too_short; /* dword 16*/ + u32 rx_dropped_header_too_small; /* dword 17*/ + u32 rx_dropped_tcp_length; /* dword 18*/ + u32 rx_dropped_runt; /* dword 19*/ + u32 rx_64_byte_packets; /* dword 20*/ + u32 rx_65_127_byte_packets; /* dword 21*/ + u32 rx_128_256_byte_packets; /* dword 22*/ + u32 rx_256_511_byte_packets; /* dword 23*/ + u32 rx_512_1023_byte_packets; /* dword 24*/ + u32 rx_1024_1518_byte_packets; /* dword 25*/ + u32 rx_1519_2047_byte_packets; /* dword 26*/ + u32 rx_2048_4095_byte_packets; /* dword 27*/ + u32 rx_4096_8191_byte_packets; /* dword 28*/ + u32 rx_8192_9216_byte_packets; /* dword 29*/ + u32 rx_ip_checksum_errs; /* dword 30*/ + u32 rx_tcp_checksum_errs; /* dword 31*/ + u32 rx_udp_checksum_errs; /* dword 32*/ + u32 rx_non_rss_packets; /* dword 33*/ + u32 rx_ipv4_packets; /* dword 34*/ + u32 rx_ipv6_packets; /* dword 35*/ + u32 rx_ipv4_bytes_lsd; /* dword 36*/ + u32 rx_ipv4_bytes_msd; /* dword 37*/ + u32 rx_ipv6_bytes_lsd; /* dword 38*/ + u32 rx_ipv6_bytes_msd; /* dword 39*/ + u32 rx_chute1_packets; /* dword 40*/ + u32 rx_chute2_packets; /* dword 41*/ + u32 rx_chute3_packets; /* dword 42*/ + u32 rx_management_packets; /* dword 43*/ + u32 rx_switched_unicast_packets; /* dword 44*/ + u32 rx_switched_multicast_packets; /* dword 45*/ + u32 rx_switched_broadcast_packets; /* dword 46*/ + u32 tx_bytes_lsd; /* dword 47*/ + u32 tx_bytes_msd; /* dword 48*/ + u32 tx_unicastframes; /* dword 49*/ + u32 tx_multicastframes; /* dword 50*/ + u32 tx_broadcastframes; /* dword 51*/ + u32 tx_pauseframes; /* dword 52*/ + u32 tx_controlframes; /* dword 53*/ + u32 tx_64_byte_packets; /* dword 54*/ + u32 tx_65_127_byte_packets; /* dword 55*/ + u32 tx_128_256_byte_packets; /* dword 56*/ + u32 tx_256_511_byte_packets; /* dword 57*/ + u32 tx_512_1023_byte_packets; /* dword 58*/ + u32 tx_1024_1518_byte_packets; /* dword 59*/ + u32 tx_1519_2047_byte_packets; /* dword 60*/ + u32 tx_2048_4095_byte_packets; /* dword 61*/ + u32 tx_4096_8191_byte_packets; /* dword 62*/ + u32 tx_8192_9216_byte_packets; /* dword 63*/ + u32 rx_fifo_overflow; /* dword 64*/ + u32 rx_input_fifo_overflow; /* dword 65*/ +}; + +struct be_rxf_stats { + struct be_port_rxf_stats port[2]; + u32 rx_drops_no_pbuf; /* dword 132*/ + u32 rx_drops_no_txpb; /* dword 133*/ + u32 rx_drops_no_erx_descr; /* dword 134*/ + u32 rx_drops_no_tpre_descr; /* dword 135*/ + u32 management_rx_port_packets; /* dword 136*/ + u32 management_rx_port_bytes; /* dword 137*/ + u32 management_rx_port_pause_frames; /* dword 138*/ + u32 management_rx_port_errors; /* dword 139*/ + u32 management_tx_port_packets; /* dword 140*/ + u32 management_tx_port_bytes; /* dword 141*/ + u32 management_tx_port_pause; /* dword 142*/ + u32 management_rx_port_rxfifo_overflow; /* dword 143*/ + u32 rx_drops_too_many_frags; /* dword 144*/ + u32 rx_drops_invalid_ring; /* dword 145*/ + u32 forwarded_packets; /* dword 146*/ + u32 rx_drops_mtu; /* dword 147*/ + u32 rsvd0[15]; +}; + +struct be_erx_stats { + u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/ + u32 debug_wdma_sent_hold; /* dword 44*/ + u32 debug_wdma_pbfree_sent_hold; /* dword 45*/ + u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/ + u32 debug_pmem_pbuf_dealloc; /* dword 47*/ +}; + +struct be_hw_stats { + struct be_rxf_stats rxf; + u32 rsvd[48]; + struct be_erx_stats erx; +}; + +struct be_cmd_req_get_stats { + struct be_cmd_req_hdr hdr; + u8 rsvd[sizeof(struct be_hw_stats)]; +}; + +struct be_cmd_resp_get_stats { + struct be_cmd_resp_hdr hdr; + struct be_hw_stats hw_stats; +}; + +struct be_cmd_req_vlan_config { + struct be_cmd_req_hdr hdr; + u8 interface_id; + u8 promiscuous; + u8 untagged; + u8 num_vlan; + u16 normal_vlan[64]; +} __packed; + +struct be_cmd_req_promiscuous_config { + struct be_cmd_req_hdr hdr; + u8 port0_promiscuous; + u8 port1_promiscuous; + u16 rsvd0; +} __packed; + +struct macaddr { + u8 byte[ETH_ALEN]; +}; + +struct be_cmd_req_mcast_mac_config { + struct be_cmd_req_hdr hdr; + u16 num_mac; + u8 promiscuous; + u8 interface_id; + struct macaddr mac[32]; +} __packed; + +static inline struct be_hw_stats * +hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd) +{ + return &cmd->hw_stats; +} + +/******************** Link Status Query *******************/ +struct be_cmd_req_link_status { + struct be_cmd_req_hdr hdr; + u32 rsvd; +}; + +struct be_link_info { + u8 duplex; + u8 speed; + u8 fault; +}; + +enum { + PHY_LINK_DUPLEX_NONE = 0x0, + PHY_LINK_DUPLEX_HALF = 0x1, + PHY_LINK_DUPLEX_FULL = 0x2 +}; + +enum { + PHY_LINK_SPEED_ZERO = 0x0, /* => No link */ + PHY_LINK_SPEED_10MBPS = 0x1, + PHY_LINK_SPEED_100MBPS = 0x2, + PHY_LINK_SPEED_1GBPS = 0x3, + PHY_LINK_SPEED_10GBPS = 0x4 +}; + +struct be_cmd_resp_link_status { + struct be_cmd_resp_hdr hdr; + u8 physical_port; + u8 mac_duplex; + u8 mac_speed; + u8 mac_fault; + u8 mgmt_mac_duplex; + u8 mgmt_mac_speed; + u16 rsvd0; +} __packed; + +/******************** Get FW Version *******************/ +#define FW_VER_LEN 32 +struct be_cmd_req_get_fw_version { + struct be_cmd_req_hdr hdr; + u8 rsvd0[FW_VER_LEN]; + u8 rsvd1[FW_VER_LEN]; +} __packed; + +struct be_cmd_resp_get_fw_version { + struct be_cmd_resp_hdr hdr; + u8 firmware_version_string[FW_VER_LEN]; + u8 fw_on_flash_version_string[FW_VER_LEN]; +} __packed; + +/******************** Set Flow Contrl *******************/ +struct be_cmd_req_set_flow_control { + struct be_cmd_req_hdr hdr; + u16 tx_flow_control; + u16 rx_flow_control; +} __packed; + +/******************** Get Flow Contrl *******************/ +struct be_cmd_req_get_flow_control { + struct be_cmd_req_hdr hdr; + u32 rsvd; +}; + +struct be_cmd_resp_get_flow_control { + struct be_cmd_resp_hdr hdr; + u16 tx_flow_control; + u16 rx_flow_control; +} __packed; + +/******************** Modify EQ Delay *******************/ +struct be_cmd_req_modify_eq_delay { + struct be_cmd_req_hdr hdr; + u32 num_eq; + struct { + u32 eq_id; + u32 phase; + u32 delay_multiplier; + } delay[8]; +} __packed; + +struct be_cmd_resp_modify_eq_delay { + struct be_cmd_resp_hdr hdr; + u32 rsvd0; +} __packed; + +/******************** Get FW Config *******************/ +struct be_cmd_req_query_fw_cfg { + struct be_cmd_req_hdr hdr; + u32 rsvd[30]; +}; + +struct be_cmd_resp_query_fw_cfg { + struct be_cmd_resp_hdr hdr; + u32 be_config_number; + u32 asic_revision; + u32 phys_port; + u32 function_mode; + u32 rsvd[26]; +}; + +extern int be_pci_fnum_get(struct be_ctrl_info *ctrl); +extern int be_cmd_POST(struct be_ctrl_info *ctrl); +extern int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, + u8 type, bool permanent, u32 if_handle); +extern int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, + u32 if_id, u32 *pmac_id); +extern int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id); +extern int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 if_flags, u8 *mac, + bool pmac_invalid, u32 *if_handle, u32 *pmac_id); +extern int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 if_handle); +extern int be_cmd_eq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *eq, int eq_delay); +extern int be_cmd_cq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *cq, struct be_queue_info *eq, + bool sol_evts, bool no_delay, + int num_cqe_dma_coalesce); +extern int be_cmd_txq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *txq, + struct be_queue_info *cq); +extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl, + struct be_queue_info *rxq, u16 cq_id, + u16 frag_size, u16 max_frame_size, u32 if_id, + u32 rss); +extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, + int type); +extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl, + struct be_link_info *link); +extern int be_cmd_reset(struct be_ctrl_info *ctrl); +extern int be_cmd_get_stats(struct be_ctrl_info *ctrl, + struct be_dma_mem *nonemb_cmd); +extern int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver); + +extern int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd); +extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, + u16 *vtag_array, u32 num, bool untagged, + bool promiscuous); +extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, + u8 port_num, bool en); +extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, + u8 *mac_table, u32 num, bool promiscuous); +extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, + u32 tx_fc, u32 rx_fc); +extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, + u32 *tx_fc, u32 *rx_fc); +extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c new file mode 100644 index 00000000000..04f4b73fa8d --- /dev/null +++ b/drivers/net/benet/be_ethtool.c @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +#include "be.h" +#include + +struct be_ethtool_stat { + char desc[ETH_GSTRING_LEN]; + int type; + int size; + int offset; +}; + +enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT}; +#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ + offsetof(_struct, field) +#define NETSTAT_INFO(field) #field, NETSTAT,\ + FIELDINFO(struct net_device_stats,\ + field) +#define DRVSTAT_INFO(field) #field, DRVSTAT,\ + FIELDINFO(struct be_drvr_stats, field) +#define MISCSTAT_INFO(field) #field, MISCSTAT,\ + FIELDINFO(struct be_rxf_stats, field) +#define PORTSTAT_INFO(field) #field, PORTSTAT,\ + FIELDINFO(struct be_port_rxf_stats, \ + field) +#define ERXSTAT_INFO(field) #field, ERXSTAT,\ + FIELDINFO(struct be_erx_stats, field) + +static const struct be_ethtool_stat et_stats[] = { + {NETSTAT_INFO(rx_packets)}, + {NETSTAT_INFO(tx_packets)}, + {NETSTAT_INFO(rx_bytes)}, + {NETSTAT_INFO(tx_bytes)}, + {NETSTAT_INFO(rx_errors)}, + {NETSTAT_INFO(tx_errors)}, + {NETSTAT_INFO(rx_dropped)}, + {NETSTAT_INFO(tx_dropped)}, + {DRVSTAT_INFO(be_tx_reqs)}, + {DRVSTAT_INFO(be_tx_stops)}, + {DRVSTAT_INFO(be_fwd_reqs)}, + {DRVSTAT_INFO(be_tx_wrbs)}, + {DRVSTAT_INFO(be_polls)}, + {DRVSTAT_INFO(be_tx_events)}, + {DRVSTAT_INFO(be_rx_events)}, + {DRVSTAT_INFO(be_tx_compl)}, + {DRVSTAT_INFO(be_rx_compl)}, + {DRVSTAT_INFO(be_ethrx_post_fail)}, + {DRVSTAT_INFO(be_802_3_dropped_frames)}, + {DRVSTAT_INFO(be_802_3_malformed_frames)}, + {DRVSTAT_INFO(be_tx_rate)}, + {DRVSTAT_INFO(be_rx_rate)}, + {PORTSTAT_INFO(rx_unicast_frames)}, + {PORTSTAT_INFO(rx_multicast_frames)}, + {PORTSTAT_INFO(rx_broadcast_frames)}, + {PORTSTAT_INFO(rx_crc_errors)}, + {PORTSTAT_INFO(rx_alignment_symbol_errors)}, + {PORTSTAT_INFO(rx_pause_frames)}, + {PORTSTAT_INFO(rx_control_frames)}, + {PORTSTAT_INFO(rx_in_range_errors)}, + {PORTSTAT_INFO(rx_out_range_errors)}, + {PORTSTAT_INFO(rx_frame_too_long)}, + {PORTSTAT_INFO(rx_address_match_errors)}, + {PORTSTAT_INFO(rx_vlan_mismatch)}, + {PORTSTAT_INFO(rx_dropped_too_small)}, + {PORTSTAT_INFO(rx_dropped_too_short)}, + {PORTSTAT_INFO(rx_dropped_header_too_small)}, + {PORTSTAT_INFO(rx_dropped_tcp_length)}, + {PORTSTAT_INFO(rx_dropped_runt)}, + {PORTSTAT_INFO(rx_fifo_overflow)}, + {PORTSTAT_INFO(rx_input_fifo_overflow)}, + {PORTSTAT_INFO(rx_ip_checksum_errs)}, + {PORTSTAT_INFO(rx_tcp_checksum_errs)}, + {PORTSTAT_INFO(rx_udp_checksum_errs)}, + {PORTSTAT_INFO(rx_non_rss_packets)}, + {PORTSTAT_INFO(rx_ipv4_packets)}, + {PORTSTAT_INFO(rx_ipv6_packets)}, + {PORTSTAT_INFO(tx_unicastframes)}, + {PORTSTAT_INFO(tx_multicastframes)}, + {PORTSTAT_INFO(tx_broadcastframes)}, + {PORTSTAT_INFO(tx_pauseframes)}, + {PORTSTAT_INFO(tx_controlframes)}, + {MISCSTAT_INFO(rx_drops_no_pbuf)}, + {MISCSTAT_INFO(rx_drops_no_txpb)}, + {MISCSTAT_INFO(rx_drops_no_erx_descr)}, + {MISCSTAT_INFO(rx_drops_no_tpre_descr)}, + {MISCSTAT_INFO(rx_drops_too_many_frags)}, + {MISCSTAT_INFO(rx_drops_invalid_ring)}, + {MISCSTAT_INFO(forwarded_packets)}, + {MISCSTAT_INFO(rx_drops_mtu)}, + {ERXSTAT_INFO(rx_drops_no_fragments)}, +}; +#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) + +static void +be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + strcpy(drvinfo->driver, DRV_NAME); + strcpy(drvinfo->version, DRV_VER); + strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN); + strcpy(drvinfo->bus_info, pci_name(adapter->pdev)); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static int +be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + + coalesce->rx_max_coalesced_frames = adapter->max_rx_coal; + + coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; + coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd; + coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd; + + coalesce->tx_coalesce_usecs = tx_eq->cur_eqd; + coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd; + coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd; + + coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic; + coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic; + + return 0; +} + +/* + * This routine is used to set interrup coalescing delay *as well as* + * the number of pkts to coalesce for LRO. + */ +static int +be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + u32 tx_max, tx_min, tx_cur; + u32 rx_max, rx_min, rx_cur; + int status = 0; + + if (coalesce->use_adaptive_tx_coalesce == 1) + return -EINVAL; + + adapter->max_rx_coal = coalesce->rx_max_coalesced_frames; + if (adapter->max_rx_coal > MAX_SKB_FRAGS) + adapter->max_rx_coal = MAX_SKB_FRAGS - 1; + + /* if AIC is being turned on now, start with an EQD of 0 */ + if (rx_eq->enable_aic == 0 && + coalesce->use_adaptive_rx_coalesce == 1) { + rx_eq->cur_eqd = 0; + } + rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce; + + rx_max = coalesce->rx_coalesce_usecs_high; + rx_min = coalesce->rx_coalesce_usecs_low; + rx_cur = coalesce->rx_coalesce_usecs; + + tx_max = coalesce->tx_coalesce_usecs_high; + tx_min = coalesce->tx_coalesce_usecs_low; + tx_cur = coalesce->tx_coalesce_usecs; + + if (tx_cur > BE_MAX_EQD) + tx_cur = BE_MAX_EQD; + if (tx_eq->cur_eqd != tx_cur) { + status = be_cmd_modify_eqd(ctrl, tx_eq->q.id, tx_cur); + if (!status) + tx_eq->cur_eqd = tx_cur; + } + + if (rx_eq->enable_aic) { + if (rx_max > BE_MAX_EQD) + rx_max = BE_MAX_EQD; + if (rx_min > rx_max) + rx_min = rx_max; + rx_eq->max_eqd = rx_max; + rx_eq->min_eqd = rx_min; + if (rx_eq->cur_eqd > rx_max) + rx_eq->cur_eqd = rx_max; + if (rx_eq->cur_eqd < rx_min) + rx_eq->cur_eqd = rx_min; + } else { + if (rx_cur > BE_MAX_EQD) + rx_cur = BE_MAX_EQD; + if (rx_eq->cur_eqd != rx_cur) { + status = be_cmd_modify_eqd(ctrl, rx_eq->q.id, rx_cur); + if (!status) + rx_eq->cur_eqd = rx_cur; + } + } + return 0; +} + +static u32 be_get_rx_csum(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + return adapter->rx_csum; +} + +static int be_set_rx_csum(struct net_device *netdev, uint32_t data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->rx_csum = true; + else + adapter->rx_csum = false; + + return 0; +} + +static void +be_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats; + struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); + struct be_rxf_stats *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats *port_stats = + &rxf_stats->port[adapter->port_num]; + struct net_device_stats *net_stats = &adapter->stats.net_stats; + struct be_erx_stats *erx_stats = &hw_stats->erx; + void *p = NULL; + int i; + + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + switch (et_stats[i].type) { + case NETSTAT: + p = net_stats; + break; + case DRVSTAT: + p = drvr_stats; + break; + case PORTSTAT: + p = port_stats; + break; + case MISCSTAT: + p = rxf_stats; + break; + case ERXSTAT: /* Currently only one ERX stat is provided */ + p = (u32 *)erx_stats + adapter->rx_obj.q.id; + break; + } + + p = (u8 *)p + et_stats[i].offset; + data[i] = (et_stats[i].size == sizeof(u64)) ? + *(u64 *)p: *(u32 *)p; + } + + return; +} + +static void +be_get_stat_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int be_get_stats_count(struct net_device *netdev) +{ + return ETHTOOL_STATS_NUM; +} + +static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + return 0; +} + +static void +be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = adapter->rx_obj.q.len; + ring->tx_max_pending = adapter->tx_obj.q.len; + + ring->rx_pending = atomic_read(&adapter->rx_obj.q.used); + ring->tx_pending = atomic_read(&adapter->tx_obj.q.used); +} + +static void +be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, + &ecmd->rx_pause); + ecmd->autoneg = AUTONEG_ENABLE; +} + +static int +be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, + ecmd->rx_pause); + if (!status) + dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); + + return status; +} + +struct ethtool_ops be_ethtool_ops = { + .get_settings = be_get_settings, + .get_drvinfo = be_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_coalesce = be_get_coalesce, + .set_coalesce = be_set_coalesce, + .get_ringparam = be_get_ringparam, + .get_pauseparam = be_get_pauseparam, + .set_pauseparam = be_set_pauseparam, + .get_rx_csum = be_get_rx_csum, + .set_rx_csum = be_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, + .get_strings = be_get_stat_strings, + .get_stats_count = be_get_stats_count, + .get_ethtool_stats = be_get_ethtool_stats, +}; diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h new file mode 100644 index 00000000000..b132aa4893c --- /dev/null +++ b/drivers/net/benet/be_hw.h @@ -0,0 +1,211 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +/********* Mailbox door bell *************/ +/* Used for driver communication with the FW. + * The software must write this register twice to post any command. First, + * it writes the register with hi=1 and the upper bits of the physical address + * for the MAILBOX structure. Software must poll the ready bit until this + * is acknowledged. Then, sotware writes the register with hi=0 with the lower + * bits in the address. It must poll the ready bit until the command is + * complete. Upon completion, the MAILBOX will contain a valid completion + * queue entry. + */ +#define MPU_MAILBOX_DB_OFFSET 0x160 +#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ +#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */ + +#define MPU_EP_CONTROL 0 + +/********** MPU semphore ******************/ +#define MPU_EP_SEMAPHORE_OFFSET 0xac +#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF +#define EP_SEMAPHORE_POST_ERR_MASK 0x1 +#define EP_SEMAPHORE_POST_ERR_SHIFT 31 +/* MPU semphore POST stage values */ +#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ +#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ +#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ +#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ + +/********* Memory BAR register ************/ +#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc +/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt + * Disable" may still globally block interrupts in addition to individual + * interrupt masks; a mechanism for the device driver to block all interrupts + * atomically without having to arbitrate for the PCI Interrupt Disable bit + * with the OS. + */ +#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ +/* PCI physical function number */ +#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */ +#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26 + +/********* Event Q door bell *************/ +#define DB_EQ_OFFSET DB_CQ_OFFSET +#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ +/* Clear the interrupt for this eq */ +#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ +/* Must be 1 */ +#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +/* Number of event entries processed */ +#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_EQ_REARM_SHIFT (29) /* bit 29 */ + +/********* Compl Q door bell *************/ +#define DB_CQ_OFFSET 0x120 +#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +/* Number of event entries processed */ +#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ + +/********** TX ULP door bell *************/ +#define DB_TXULP1_OFFSET 0x60 +#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +/* Number of tx entries posted */ +#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ +#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */ + +/********** RQ(erx) door bell ************/ +#define DB_RQ_OFFSET 0x100 +#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +/* Number of rx frags posted */ +#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ + +/* + * BE descriptors: host memory data structures whose formats + * are hardwired in BE silicon. + */ +/* Event Queue Descriptor */ +#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */ +#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */ +#define EQ_ENTRY_RES_ID_SHIFT 16 +struct be_eq_entry { + u32 evt; +}; + +/* TX Queue Descriptor */ +#define ETH_WRB_FRAG_LEN_MASK 0xFFFF +struct be_eth_wrb { + u32 frag_pa_hi; /* dword 0 */ + u32 frag_pa_lo; /* dword 1 */ + u32 rsvd0; /* dword 2 */ + u32 frag_len; /* dword 3: bits 0 - 15 */ +} __packed; + +/* Pseudo amap definition for eth_hdr_wrb in which each bit of the + * actual structure is defined as a byte : used to calculate + * offset/shift/mask of each field */ +struct amap_eth_hdr_wrb { + u8 rsvd0[32]; /* dword 0 */ + u8 rsvd1[32]; /* dword 1 */ + u8 complete; /* dword 2 */ + u8 event; + u8 crc; + u8 forward; + u8 ipsec; + u8 mgmt; + u8 ipcs; + u8 udpcs; + u8 tcpcs; + u8 lso; + u8 vlan; + u8 gso[2]; + u8 num_wrb[5]; + u8 lso_mss[14]; + u8 len[16]; /* dword 3 */ + u8 vlan_tag[16]; +} __packed; + +struct be_eth_hdr_wrb { + u32 dw[4]; +}; + +/* TX Compl Queue Descriptor */ + +/* Pseudo amap definition for eth_tx_compl in which each bit of the + * actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_tx_compl { + u8 wrb_index[16]; /* dword 0 */ + u8 ct[2]; /* dword 0 */ + u8 port[2]; /* dword 0 */ + u8 rsvd0[8]; /* dword 0 */ + u8 status[4]; /* dword 0 */ + u8 user_bytes[16]; /* dword 1 */ + u8 nwh_bytes[8]; /* dword 1 */ + u8 lso; /* dword 1 */ + u8 cast_enc[2]; /* dword 1 */ + u8 rsvd1[5]; /* dword 1 */ + u8 rsvd2[32]; /* dword 2 */ + u8 pkts[16]; /* dword 3 */ + u8 ringid[11]; /* dword 3 */ + u8 hash_val[4]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + +struct be_eth_tx_compl { + u32 dw[4]; +}; + +/* RX Queue Descriptor */ +struct be_eth_rx_d { + u32 fragpa_hi; + u32 fragpa_lo; +}; + +/* RX Compl Queue Descriptor */ + +/* Pseudo amap definition for eth_rx_compl in which each bit of the + * actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_rx_compl { + u8 vlan_tag[16]; /* dword 0 */ + u8 pktsize[14]; /* dword 0 */ + u8 port; /* dword 0 */ + u8 ip_opt; /* dword 0 */ + u8 err; /* dword 1 */ + u8 rsshp; /* dword 1 */ + u8 ipf; /* dword 1 */ + u8 tcpf; /* dword 1 */ + u8 udpf; /* dword 1 */ + u8 ipcksm; /* dword 1 */ + u8 l4_cksm; /* dword 1 */ + u8 ip_version; /* dword 1 */ + u8 macdst[6]; /* dword 1 */ + u8 vtp; /* dword 1 */ + u8 rsvd0; /* dword 1 */ + u8 fragndx[10]; /* dword 1 */ + u8 ct[2]; /* dword 1 */ + u8 sw; /* dword 1 */ + u8 numfrags[3]; /* dword 1 */ + u8 rss_flush; /* dword 2 */ + u8 cast_enc[2]; /* dword 2 */ + u8 qnq; /* dword 2 */ + u8 rss_bank; /* dword 2 */ + u8 rsvd1[23]; /* dword 2 */ + u8 lro_pkt; /* dword 2 */ + u8 rsvd2[2]; /* dword 2 */ + u8 valid; /* dword 2 */ + u8 rsshash[32]; /* dword 3 */ +} __packed; + +struct be_eth_rx_compl { + u32 dw[4]; +}; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c new file mode 100644 index 00000000000..897a63de5bd --- /dev/null +++ b/drivers/net/benet/be_main.c @@ -0,0 +1,1903 @@ +/* + * Copyright (C) 2005 - 2009 ServerEngines + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@serverengines.com + * + * ServerEngines + * 209 N. Fair Oaks Ave + * Sunnyvale, CA 94085 + */ + +#include "be.h" + +MODULE_VERSION(DRV_VER); +MODULE_DEVICE_TABLE(pci, be_dev_ids); +MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); +MODULE_AUTHOR("ServerEngines Corporation"); +MODULE_LICENSE("GPL"); + +static unsigned int rx_frag_size = 2048; +module_param(rx_frag_size, uint, S_IRUGO); +MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); + +#define BE_VENDOR_ID 0x19a2 +#define BE2_DEVICE_ID_1 0x0211 +static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { + { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, be_dev_ids); + +static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) +{ + struct be_dma_mem *mem = &q->dma_mem; + if (mem->va) + pci_free_consistent(adapter->pdev, mem->size, + mem->va, mem->dma); +} + +static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, + u16 len, u16 entry_size) +{ + struct be_dma_mem *mem = &q->dma_mem; + + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + mem->size = len * entry_size; + mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); + if (!mem->va) + return -1; + memset(mem->va, 0, mem->size); + return 0; +} + +static inline void *queue_head_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->head * q->entry_size; +} + +static inline void *queue_tail_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->tail * q->entry_size; +} + +static inline void queue_head_inc(struct be_queue_info *q) +{ + index_inc(&q->head, q->len); +} + +static inline void queue_tail_inc(struct be_queue_info *q) +{ + index_inc(&q->tail, q->len); +} + +static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) +{ + u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; + u32 reg = ioread32(addr); + u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + if (!enabled && enable) { + reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + } else if (enabled && !enable) { + reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + } else { + printk(KERN_WARNING DRV_NAME + ": bad value in membar_int_ctrl reg=0x%x\n", reg); + return; + } + iowrite32(reg, addr); +} + +static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) +{ + u32 val = 0; + val |= qid & DB_RQ_RING_ID_MASK; + val |= posted << DB_RQ_NUM_POSTED_SHIFT; + iowrite32(val, ctrl->db + DB_RQ_OFFSET); +} + +static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) +{ + u32 val = 0; + val |= qid & DB_TXULP_RING_ID_MASK; + val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; + iowrite32(val, ctrl->db + DB_TXULP1_OFFSET); +} + +static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid, + bool arm, bool clear_int, u16 num_popped) +{ + u32 val = 0; + val |= qid & DB_EQ_RING_ID_MASK; + if (arm) + val |= 1 << DB_EQ_REARM_SHIFT; + if (clear_int) + val |= 1 << DB_EQ_CLR_SHIFT; + val |= 1 << DB_EQ_EVNT_SHIFT; + val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; + iowrite32(val, ctrl->db + DB_EQ_OFFSET); +} + +static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, + bool arm, u16 num_popped) +{ + u32 val = 0; + val |= qid & DB_CQ_RING_ID_MASK; + if (arm) + val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; + iowrite32(val, ctrl->db + DB_CQ_OFFSET); +} + + +static int be_mac_addr_set(struct net_device *netdev, void *p) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int status = 0; + + if (netif_running(netdev)) { + status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle, + adapter->pmac_id); + if (status) + return status; + + status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data, + adapter->if_handle, &adapter->pmac_id); + } + + if (!status) + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + return status; +} + +static void netdev_stats_update(struct be_adapter *adapter) +{ + struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); + struct be_rxf_stats *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats *port_stats = + &rxf_stats->port[adapter->port_num]; + struct net_device_stats *dev_stats = &adapter->stats.net_stats; + + dev_stats->rx_packets = port_stats->rx_total_frames; + dev_stats->tx_packets = port_stats->tx_unicastframes + + port_stats->tx_multicastframes + port_stats->tx_broadcastframes; + dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | + (u64) port_stats->rx_bytes_lsd; + dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 | + (u64) port_stats->tx_bytes_lsd; + + /* bad pkts received */ + dev_stats->rx_errors = port_stats->rx_crc_errors + + port_stats->rx_alignment_symbol_errors + + port_stats->rx_in_range_errors + + port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; + + /* packet transmit problems */ + dev_stats->tx_errors = 0; + + /* no space in linux buffers */ + dev_stats->rx_dropped = 0; + + /* no space available in linux */ + dev_stats->tx_dropped = 0; + + dev_stats->multicast = port_stats->tx_multicastframes; + dev_stats->collisions = 0; + + /* detailed rx errors */ + dev_stats->rx_length_errors = port_stats->rx_in_range_errors + + port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; + /* receive ring buffer overflow */ + dev_stats->rx_over_errors = 0; + dev_stats->rx_crc_errors = port_stats->rx_crc_errors; + + /* frame alignment errors */ + dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; + /* receiver fifo overrun */ + /* drops_no_pbuf is no per i/f, it's per BE card */ + dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + + port_stats->rx_input_fifo_overflow + + rxf_stats->rx_drops_no_pbuf; + /* receiver missed packetd */ + dev_stats->rx_missed_errors = 0; + /* detailed tx_errors */ + dev_stats->tx_aborted_errors = 0; + dev_stats->tx_carrier_errors = 0; + dev_stats->tx_fifo_errors = 0; + dev_stats->tx_heartbeat_errors = 0; + dev_stats->tx_window_errors = 0; +} + +static void be_link_status_update(struct be_adapter *adapter) +{ + struct be_link_info *prev = &adapter->link; + struct be_link_info now = { 0 }; + struct net_device *netdev = adapter->netdev; + + be_cmd_link_status_query(&adapter->ctrl, &now); + + /* If link came up or went down */ + if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || + prev->speed == PHY_LINK_SPEED_ZERO)) { + if (now.speed == PHY_LINK_SPEED_ZERO) { + netif_stop_queue(netdev); + netif_carrier_off(netdev); + printk(KERN_INFO "%s: Link down\n", netdev->name); + } else { + netif_start_queue(netdev); + netif_carrier_on(netdev); + printk(KERN_INFO "%s: Link up\n", netdev->name); + } + } + *prev = now; +} + +/* Update the EQ delay n BE based on the RX frags consumed / sec */ +static void be_rx_eqd_update(struct be_adapter *adapter) +{ + u32 eqd; + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_drvr_stats *stats = &adapter->stats.drvr_stats; + + /* Update once a second */ + if (((jiffies - stats->rx_fps_jiffies) < HZ) || rx_eq->enable_aic == 0) + return; + + stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / + ((jiffies - stats->rx_fps_jiffies) / HZ); + + stats->rx_fps_jiffies = jiffies; + stats->be_prev_rx_frags = stats->be_rx_frags; + eqd = stats->be_rx_fps / 110000; + eqd = eqd << 3; + if (eqd > rx_eq->max_eqd) + eqd = rx_eq->max_eqd; + if (eqd < rx_eq->min_eqd) + eqd = rx_eq->min_eqd; + if (eqd < 10) + eqd = 0; + if (eqd != rx_eq->cur_eqd) + be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd); + + rx_eq->cur_eqd = eqd; +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + int status; + + /* Check link */ + be_link_status_update(adapter); + + /* Get Stats */ + status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); + if (!status) + netdev_stats_update(adapter); + + /* Set EQ delay */ + be_rx_eqd_update(adapter); + + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + +static struct net_device_stats *be_get_stats(struct net_device *dev) +{ + struct be_adapter *adapter = netdev_priv(dev); + + return &adapter->stats.net_stats; +} + +static void be_tx_stats_update(struct be_adapter *adapter, + u32 wrb_cnt, u32 copied, bool stopped) +{ + struct be_drvr_stats *stats = &adapter->stats.drvr_stats; + stats->be_tx_reqs++; + stats->be_tx_wrbs += wrb_cnt; + stats->be_tx_bytes += copied; + if (stopped) + stats->be_tx_stops++; + + /* Update tx rate once in two seconds */ + if ((jiffies - stats->be_tx_jiffies) > 2 * HZ) { + u32 r; + r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) / + ((u32) (jiffies - stats->be_tx_jiffies) / HZ); + r = (r / 1000000); /* M bytes/s */ + stats->be_tx_rate = (r * 8); /* M bits/s */ + stats->be_tx_jiffies = jiffies; + stats->be_tx_bytes_prev = stats->be_tx_bytes; + } +} + +/* Determine number of WRB entries needed to xmit data in an skb */ +static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) +{ + int cnt = 0; + while (skb) { + if (skb->len > skb->data_len) + cnt++; + cnt += skb_shinfo(skb)->nr_frags; + skb = skb_shinfo(skb)->frag_list; + } + /* to account for hdr wrb */ + cnt++; + if (cnt & 1) { + /* add a dummy to make it an even num */ + cnt++; + *dummy = true; + } else + *dummy = false; + BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); + return cnt; +} + +static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) +{ + wrb->frag_pa_hi = upper_32_bits(addr); + wrb->frag_pa_lo = addr & 0xFFFFFFFF; + wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; +} + +static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, + bool vlan, u32 wrb_cnt, u32 len) +{ + memset(hdr, 0, sizeof(*hdr)); + + AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); + + if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, + hdr, skb_shinfo(skb)->gso_size); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (is_tcp_pkt(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); + else if (is_udp_pkt(skb)) + AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); + } + + if (vlan && vlan_tx_tag_present(skb)) { + AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, + hdr, vlan_tx_tag_get(skb)); + } + + AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); + AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); +} + + +static int make_tx_wrbs(struct be_adapter *adapter, + struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) +{ + u64 busaddr; + u32 i, copied = 0; + struct pci_dev *pdev = adapter->pdev; + struct sk_buff *first_skb = skb; + struct be_queue_info *txq = &adapter->tx_obj.q; + struct be_eth_wrb *wrb; + struct be_eth_hdr_wrb *hdr; + + atomic_add(wrb_cnt, &txq->used); + hdr = queue_head_node(txq); + queue_head_inc(txq); + + while (skb) { + if (skb->len > skb->data_len) { + int len = skb->len - skb->data_len; + busaddr = pci_map_single(pdev, skb->data, len, + PCI_DMA_TODEVICE); + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, len); + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + queue_head_inc(txq); + copied += len; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = + &skb_shinfo(skb)->frags[i]; + busaddr = pci_map_page(pdev, frag->page, + frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, frag->size); + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + queue_head_inc(txq); + copied += frag->size; + } + skb = skb_shinfo(skb)->frag_list; + } + + if (dummy_wrb) { + wrb = queue_head_node(txq); + wrb_fill(wrb, 0, 0); + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + queue_head_inc(txq); + } + + wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, + wrb_cnt, copied); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + return copied; +} + +static int be_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_tx_obj *tx_obj = &adapter->tx_obj; + struct be_queue_info *txq = &tx_obj->q; + u32 wrb_cnt = 0, copied = 0; + u32 start = txq->head; + bool dummy_wrb, stopped = false; + + wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); + + copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); + + /* record the sent skb in the sent_skb table */ + BUG_ON(tx_obj->sent_skb_list[start]); + tx_obj->sent_skb_list[start] = skb; + + /* Ensure that txq has space for the next skb; Else stop the queue + * *BEFORE* ringing the tx doorbell, so that we serialze the + * tx compls of the current transmit which'll wake up the queue + */ + if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { + netif_stop_queue(netdev); + stopped = true; + } + + be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt); + + netdev->trans_start = jiffies; + + be_tx_stats_update(adapter, wrb_cnt, copied, stopped); + return NETDEV_TX_OK; +} + +static int be_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct be_adapter *adapter = netdev_priv(netdev); + if (new_mtu < BE_MIN_MTU || + new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { + dev_info(&adapter->pdev->dev, + "MTU must be between %d and %d bytes\n", + BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + return 0; +} + +/* + * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, + * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, + * set the BE in promiscuous VLAN mode. + */ +static void be_vids_config(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u16 vtag[BE_NUM_VLANS_SUPPORTED]; + u16 ntags = 0, i; + + if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { + /* Construct VLAN Table to give to HW */ + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + if (adapter->vlan_tag[i]) { + vtag[ntags] = cpu_to_le16(i); + ntags++; + } + } + be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, + vtag, ntags, 1, 0); + } else { + be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, + NULL, 0, 1, 1); + } +} + +static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + struct be_ctrl_info *ctrl = &adapter->ctrl; + + be_eq_notify(ctrl, rx_eq->q.id, false, false, 0); + be_eq_notify(ctrl, tx_eq->q.id, false, false, 0); + adapter->vlan_grp = grp; + be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); + be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); +} + +static void be_vlan_add_vid(struct net_device *netdev, u16 vid) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->num_vlans++; + adapter->vlan_tag[vid] = 1; + + be_vids_config(netdev); +} + +static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->num_vlans--; + adapter->vlan_tag[vid] = 0; + + vlan_group_set_device(adapter->vlan_grp, vid, NULL); + be_vids_config(netdev); +} + +static void be_set_multicast_filter(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct dev_mc_list *mc_ptr; + u8 mac_addr[32][ETH_ALEN]; + int i = 0; + + if (netdev->flags & IFF_ALLMULTI) { + /* set BE in Multicast promiscuous */ + be_cmd_mcast_mac_set(&adapter->ctrl, + adapter->if_handle, NULL, 0, true); + return; + } + + for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); + if (++i >= 32) { + be_cmd_mcast_mac_set(&adapter->ctrl, + adapter->if_handle, &mac_addr[0][0], i, false); + i = 0; + } + + } + + if (i) { + /* reset the promiscuous mode also. */ + be_cmd_mcast_mac_set(&adapter->ctrl, + adapter->if_handle, &mac_addr[0][0], i, false); + } +} + +static void be_set_multicast_list(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_PROMISC) { + be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); + } else { + be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); + be_set_multicast_filter(netdev); + } +} + +static void be_rx_rate_update(struct be_adapter *adapter, u32 pktsize, + u16 numfrags) +{ + struct be_drvr_stats *stats = &adapter->stats.drvr_stats; + u32 rate; + + stats->be_rx_compl++; + stats->be_rx_frags += numfrags; + stats->be_rx_bytes += pktsize; + + /* Update the rate once in two seconds */ + if ((jiffies - stats->be_rx_jiffies) < 2 * HZ) + return; + + rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) / + ((u32) (jiffies - stats->be_rx_jiffies) / HZ); + rate = (rate / 1000000); /* MB/Sec */ + stats->be_rx_rate = (rate * 8); /* Mega Bits/Sec */ + stats->be_rx_jiffies = jiffies; + stats->be_rx_bytes_prev = stats->be_rx_bytes; +} + +static struct be_rx_page_info * +get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) +{ + struct be_rx_page_info *rx_page_info; + struct be_queue_info *rxq = &adapter->rx_obj.q; + + rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; + BUG_ON(!rx_page_info->page); + + if (rx_page_info->last_page_user) + pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), + adapter->big_page_size, PCI_DMA_FROMDEVICE); + + atomic_dec(&rxq->used); + return rx_page_info; +} + +/* Throwaway the data in the Rx completion */ +static void be_rx_compl_discard(struct be_adapter *adapter, + struct be_eth_rx_compl *rxcp) +{ + struct be_queue_info *rxq = &adapter->rx_obj.q; + struct be_rx_page_info *page_info; + u16 rxq_idx, i, num_rcvd; + + rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); + num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); + + for (i = 0; i < num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxq_idx); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + index_inc(&rxq_idx, rxq->len); + } +} + +/* + * skb_fill_rx_data forms a complete skb for an ether frame + * indicated by rxcp. + */ +static void skb_fill_rx_data(struct be_adapter *adapter, + struct sk_buff *skb, struct be_eth_rx_compl *rxcp) +{ + struct be_queue_info *rxq = &adapter->rx_obj.q; + struct be_rx_page_info *page_info; + u16 rxq_idx, i, num_rcvd; + u32 pktsize, hdr_len, curr_frag_len; + u8 *start; + + rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); + pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); + num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); + + page_info = get_rx_page_info(adapter, rxq_idx); + + start = page_address(page_info->page) + page_info->page_offset; + prefetch(start); + + /* Copy data in the first descriptor of this completion */ + curr_frag_len = min(pktsize, rx_frag_size); + + /* Copy the header portion into skb_data */ + hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); + memcpy(skb->data, start, hdr_len); + skb->len = curr_frag_len; + if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ + /* Complete packet has now been moved to data */ + put_page(page_info->page); + skb->data_len = 0; + skb->tail += curr_frag_len; + } else { + skb_shinfo(skb)->nr_frags = 1; + skb_shinfo(skb)->frags[0].page = page_info->page; + skb_shinfo(skb)->frags[0].page_offset = + page_info->page_offset + hdr_len; + skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; + skb->data_len = curr_frag_len - hdr_len; + skb->tail += hdr_len; + } + memset(page_info, 0, sizeof(*page_info)); + + if (pktsize <= rx_frag_size) { + BUG_ON(num_rcvd != 1); + return; + } + + /* More frags present for this completion */ + pktsize -= curr_frag_len; /* account for above copied frag */ + for (i = 1; i < num_rcvd; i++) { + index_inc(&rxq_idx, rxq->len); + page_info = get_rx_page_info(adapter, rxq_idx); + + curr_frag_len = min(pktsize, rx_frag_size); + + skb_shinfo(skb)->frags[i].page = page_info->page; + skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; + skb_shinfo(skb)->frags[i].size = curr_frag_len; + skb->len += curr_frag_len; + skb->data_len += curr_frag_len; + skb_shinfo(skb)->nr_frags++; + pktsize -= curr_frag_len; + + memset(page_info, 0, sizeof(*page_info)); + } + + be_rx_rate_update(adapter, pktsize, num_rcvd); + return; +} + +/* Process the RX completion indicated by rxcp when LRO is disabled */ +static void be_rx_compl_process(struct be_adapter *adapter, + struct be_eth_rx_compl *rxcp) +{ + struct sk_buff *skb; + u32 vtp, vid; + int l4_cksm; + + l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); + vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); + + skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); + if (!skb) { + if (net_ratelimit()) + dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); + be_rx_compl_discard(adapter, rxcp); + return; + } + + skb_reserve(skb, NET_IP_ALIGN); + + skb_fill_rx_data(adapter, skb, rxcp); + + if (l4_cksm && adapter->rx_csum) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + skb->truesize = skb->len + sizeof(struct sk_buff); + skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->dev = adapter->netdev; + + if (vtp) { + if (!adapter->vlan_grp || adapter->num_vlans == 0) { + kfree_skb(skb); + return; + } + vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); + vid = be16_to_cpu(vid); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); + } else { + netif_receive_skb(skb); + } + + adapter->netdev->last_rx = jiffies; + + return; +} + +/* Process the RX completion indicated by rxcp when LRO is enabled */ +static void be_rx_compl_process_lro(struct be_adapter *adapter, + struct be_eth_rx_compl *rxcp) +{ + struct be_rx_page_info *page_info; + struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; + struct be_queue_info *rxq = &adapter->rx_obj.q; + u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; + u16 i, rxq_idx = 0, vid; + + num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); + pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); + vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); + rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); + + remaining = pkt_size; + for (i = 0; i < num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxq_idx); + + curr_frag_len = min(remaining, rx_frag_size); + + rx_frags[i].page = page_info->page; + rx_frags[i].page_offset = page_info->page_offset; + rx_frags[i].size = curr_frag_len; + remaining -= curr_frag_len; + + index_inc(&rxq_idx, rxq->len); + + memset(page_info, 0, sizeof(*page_info)); + } + + if (likely(!vlanf)) { + lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, + pkt_size, NULL, 0); + } else { + vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); + vid = be16_to_cpu(vid); + + if (!adapter->vlan_grp || adapter->num_vlans == 0) + return; + + lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr, + rx_frags, pkt_size, pkt_size, adapter->vlan_grp, + vid, NULL, 0); + } + + be_rx_rate_update(adapter, pkt_size, num_rcvd); + return; +} + +static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) +{ + struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); + + if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) + return NULL; + + be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); + + rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; + + queue_tail_inc(&adapter->rx_obj.cq); + return rxcp; +} + +static inline struct page *be_alloc_pages(u32 size) +{ + gfp_t alloc_flags = GFP_ATOMIC; + u32 order = get_order(size); + if (order > 0) + alloc_flags |= __GFP_COMP; + return alloc_pages(alloc_flags, order); +} + +/* + * Allocate a page, split it to fragments of size rx_frag_size and post as + * receive buffers to BE + */ +static void be_post_rx_frags(struct be_adapter *adapter) +{ + struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; + struct be_rx_page_info *page_info = NULL; + struct be_queue_info *rxq = &adapter->rx_obj.q; + struct page *pagep = NULL; + struct be_eth_rx_d *rxd; + u64 page_dmaaddr = 0, frag_dmaaddr; + u32 posted, page_offset = 0; + + + page_info = &page_info_tbl[rxq->head]; + for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { + if (!pagep) { + pagep = be_alloc_pages(adapter->big_page_size); + if (unlikely(!pagep)) { + drvr_stats(adapter)->be_ethrx_post_fail++; + break; + } + page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, + adapter->big_page_size, + PCI_DMA_FROMDEVICE); + page_info->page_offset = 0; + } else { + get_page(pagep); + page_info->page_offset = page_offset + rx_frag_size; + } + page_offset = page_info->page_offset; + page_info->page = pagep; + pci_unmap_addr_set(page_info, bus, page_dmaaddr); + frag_dmaaddr = page_dmaaddr + page_info->page_offset; + + rxd = queue_head_node(rxq); + rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); + rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); + queue_head_inc(rxq); + + /* Any space left in the current big page for another frag? */ + if ((page_offset + rx_frag_size + rx_frag_size) > + adapter->big_page_size) { + pagep = NULL; + page_info->last_page_user = true; + } + page_info = &page_info_tbl[rxq->head]; + } + if (pagep) + page_info->last_page_user = true; + + if (posted) { + be_rxq_notify(&adapter->ctrl, rxq->id, posted); + atomic_add(posted, &rxq->used); + } + + return; +} + +static struct be_eth_tx_compl * +be_tx_compl_get(struct be_adapter *adapter) +{ + struct be_queue_info *tx_cq = &adapter->tx_obj.cq; + struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); + + if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + return NULL; + + be_dws_le_to_cpu(txcp, sizeof(*txcp)); + + txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; + + queue_tail_inc(tx_cq); + return txcp; +} + +static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) +{ + struct be_queue_info *txq = &adapter->tx_obj.q; + struct be_eth_wrb *wrb; + struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; + struct sk_buff *sent_skb; + u64 busaddr; + u16 cur_index, num_wrbs = 0; + + cur_index = txq->tail; + sent_skb = sent_skbs[cur_index]; + BUG_ON(!sent_skb); + sent_skbs[cur_index] = NULL; + + do { + cur_index = txq->tail; + wrb = queue_tail_node(txq); + be_dws_le_to_cpu(wrb, sizeof(*wrb)); + busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; + if (busaddr != 0) { + pci_unmap_single(adapter->pdev, busaddr, + wrb->frag_len, PCI_DMA_TODEVICE); + } + num_wrbs++; + queue_tail_inc(txq); + } while (cur_index != last_index); + + atomic_sub(num_wrbs, &txq->used); + + kfree_skb(sent_skb); +} + +static void be_rx_q_clean(struct be_adapter *adapter) +{ + struct be_rx_page_info *page_info; + struct be_queue_info *rxq = &adapter->rx_obj.q; + struct be_queue_info *rx_cq = &adapter->rx_obj.cq; + struct be_eth_rx_compl *rxcp; + u16 tail; + + /* First cleanup pending rx completions */ + while ((rxcp = be_rx_compl_get(adapter)) != NULL) { + be_rx_compl_discard(adapter, rxcp); + be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1); + } + + /* Then free posted rx buffer that were not used */ + tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; + for (; tail != rxq->head; index_inc(&tail, rxq->len)) { + page_info = get_rx_page_info(adapter, tail); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); +} + +static void be_tx_q_clean(struct be_adapter *adapter) +{ + struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; + struct sk_buff *sent_skb; + struct be_queue_info *txq = &adapter->tx_obj.q; + u16 last_index; + bool dummy_wrb; + + while (atomic_read(&txq->used)) { + sent_skb = sent_skbs[txq->tail]; + last_index = txq->tail; + index_adv(&last_index, + wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); + be_tx_compl_process(adapter, last_index); + } +} + +static void be_tx_queues_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + + q = &adapter->tx_obj.q; + if (q->created) + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ); + be_queue_free(adapter, q); + + q = &adapter->tx_obj.cq; + if (q->created) + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); + be_queue_free(adapter, q); + + /* No more tx completions can be rcvd now; clean up if there are + * any pending completions or pending tx requests */ + be_tx_q_clean(adapter); + + q = &adapter->tx_eq.q; + if (q->created) + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); + be_queue_free(adapter, q); +} + +static int be_tx_queues_create(struct be_adapter *adapter) +{ + struct be_queue_info *eq, *q, *cq; + + adapter->tx_eq.max_eqd = 0; + adapter->tx_eq.min_eqd = 0; + adapter->tx_eq.cur_eqd = 96; + adapter->tx_eq.enable_aic = false; + /* Alloc Tx Event queue */ + eq = &adapter->tx_eq.q; + if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) + return -1; + + /* Ask BE to create Tx Event queue */ + if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd)) + goto tx_eq_free; + /* Alloc TX eth compl queue */ + cq = &adapter->tx_obj.cq; + if (be_queue_alloc(adapter, cq, TX_CQ_LEN, + sizeof(struct be_eth_tx_compl))) + goto tx_eq_destroy; + + /* Ask BE to create Tx eth compl queue */ + if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3)) + goto tx_cq_free; + + /* Alloc TX eth queue */ + q = &adapter->tx_obj.q; + if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) + goto tx_cq_destroy; + + /* Ask BE to create Tx eth queue */ + if (be_cmd_txq_create(&adapter->ctrl, q, cq)) + goto tx_q_free; + return 0; + +tx_q_free: + be_queue_free(adapter, q); +tx_cq_destroy: + be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); +tx_cq_free: + be_queue_free(adapter, cq); +tx_eq_destroy: + be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); +tx_eq_free: + be_queue_free(adapter, eq); + return -1; +} + +static void be_rx_queues_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + + q = &adapter->rx_obj.q; + if (q->created) { + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ); + be_rx_q_clean(adapter); + } + be_queue_free(adapter, q); + + q = &adapter->rx_obj.cq; + if (q->created) + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); + be_queue_free(adapter, q); + + q = &adapter->rx_eq.q; + if (q->created) + be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); + be_queue_free(adapter, q); +} + +static int be_rx_queues_create(struct be_adapter *adapter) +{ + struct be_queue_info *eq, *q, *cq; + int rc; + + adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; + adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; + adapter->rx_eq.max_eqd = BE_MAX_EQD; + adapter->rx_eq.min_eqd = 0; + adapter->rx_eq.cur_eqd = 0; + adapter->rx_eq.enable_aic = true; + + /* Alloc Rx Event queue */ + eq = &adapter->rx_eq.q; + rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, + sizeof(struct be_eq_entry)); + if (rc) + return rc; + + /* Ask BE to create Rx Event queue */ + rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd); + if (rc) + goto rx_eq_free; + + /* Alloc RX eth compl queue */ + cq = &adapter->rx_obj.cq; + rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, + sizeof(struct be_eth_rx_compl)); + if (rc) + goto rx_eq_destroy; + + /* Ask BE to create Rx eth compl queue */ + rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3); + if (rc) + goto rx_cq_free; + + /* Alloc RX eth queue */ + q = &adapter->rx_obj.q; + rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); + if (rc) + goto rx_cq_destroy; + + /* Ask BE to create Rx eth queue */ + rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size, + BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); + if (rc) + goto rx_q_free; + + return 0; +rx_q_free: + be_queue_free(adapter, q); +rx_cq_destroy: + be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); +rx_cq_free: + be_queue_free(adapter, cq); +rx_eq_destroy: + be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); +rx_eq_free: + be_queue_free(adapter, eq); + return rc; +} +static bool event_get(struct be_eq_obj *eq_obj, u16 *rid) +{ + struct be_eq_entry *entry = queue_tail_node(&eq_obj->q); + u32 evt = entry->evt; + + if (!evt) + return false; + + evt = le32_to_cpu(evt); + *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK; + entry->evt = 0; + queue_tail_inc(&eq_obj->q); + return true; +} + +static int event_handle(struct be_ctrl_info *ctrl, + struct be_eq_obj *eq_obj) +{ + u16 rid = 0, num = 0; + + while (event_get(eq_obj, &rid)) + num++; + + /* We can see an interrupt and no event */ + be_eq_notify(ctrl, eq_obj->q.id, true, true, num); + if (num) + napi_schedule(&eq_obj->napi); + + return num; +} + +static irqreturn_t be_intx(int irq, void *dev) +{ + struct be_adapter *adapter = dev; + struct be_ctrl_info *ctrl = &adapter->ctrl; + int rx, tx; + + tx = event_handle(ctrl, &adapter->tx_eq); + rx = event_handle(ctrl, &adapter->rx_eq); + + if (rx || tx) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +static irqreturn_t be_msix_rx(int irq, void *dev) +{ + struct be_adapter *adapter = dev; + + event_handle(&adapter->ctrl, &adapter->rx_eq); + + return IRQ_HANDLED; +} + +static irqreturn_t be_msix_tx(int irq, void *dev) +{ + struct be_adapter *adapter = dev; + + event_handle(&adapter->ctrl, &adapter->tx_eq); + + return IRQ_HANDLED; +} + +static inline bool do_lro(struct be_adapter *adapter, + struct be_eth_rx_compl *rxcp) +{ + int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); + int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); + + if (err) + drvr_stats(adapter)->be_rxcp_err++; + + return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ? + false : true; +} + +int be_poll_rx(struct napi_struct *napi, int budget) +{ + struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = + container_of(rx_eq, struct be_adapter, rx_eq); + struct be_queue_info *rx_cq = &adapter->rx_obj.cq; + struct be_eth_rx_compl *rxcp; + u32 work_done; + + for (work_done = 0; work_done < budget; work_done++) { + rxcp = be_rx_compl_get(adapter); + if (!rxcp) + break; + + if (do_lro(adapter, rxcp)) + be_rx_compl_process_lro(adapter, rxcp); + else + be_rx_compl_process(adapter, rxcp); + } + + lro_flush_all(&adapter->rx_obj.lro_mgr); + + /* Refill the queue */ + if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) + be_post_rx_frags(adapter); + + /* All consumed */ + if (work_done < budget) { + napi_complete(napi); + be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done); + } else { + /* More to be consumed; continue with interrupts disabled */ + be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done); + } + return work_done; +} + +/* For TX we don't honour budget; consume everything */ +int be_poll_tx(struct napi_struct *napi, int budget) +{ + struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = + container_of(tx_eq, struct be_adapter, tx_eq); + struct be_tx_obj *tx_obj = &adapter->tx_obj; + struct be_queue_info *tx_cq = &tx_obj->cq; + struct be_queue_info *txq = &tx_obj->q; + struct be_eth_tx_compl *txcp; + u32 num_cmpl = 0; + u16 end_idx; + + while ((txcp = be_tx_compl_get(adapter))) { + end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, + wrb_index, txcp); + be_tx_compl_process(adapter, end_idx); + num_cmpl++; + } + + /* As Tx wrbs have been freed up, wake up netdev queue if + * it was stopped due to lack of tx wrbs. + */ + if (netif_queue_stopped(adapter->netdev) && + atomic_read(&txq->used) < txq->len / 2) { + netif_wake_queue(adapter->netdev); + } + + napi_complete(napi); + + be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); + + drvr_stats(adapter)->be_tx_events++; + drvr_stats(adapter)->be_tx_compl += num_cmpl; + + return 1; +} + +static void be_msix_enable(struct be_adapter *adapter) +{ + int i, status; + + for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) + adapter->msix_entries[i].entry = i; + + status = pci_enable_msix(adapter->pdev, adapter->msix_entries, + BE_NUM_MSIX_VECTORS); + if (status == 0) + adapter->msix_enabled = true; + return; +} + +static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) +{ + return adapter->msix_entries[eq_id - + 8 * adapter->ctrl.pci_func].vector; +} + +static int be_msix_register(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + struct be_eq_obj *rx_eq = &adapter->rx_eq; + int status, vec; + + sprintf(tx_eq->desc, "%s-tx", netdev->name); + vec = be_msix_vec_get(adapter, tx_eq->q.id); + status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); + if (status) + goto err; + + sprintf(rx_eq->desc, "%s-rx", netdev->name); + vec = be_msix_vec_get(adapter, rx_eq->q.id); + status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter); + if (status) { /* Free TX IRQ */ + vec = be_msix_vec_get(adapter, tx_eq->q.id); + free_irq(vec, adapter); + goto err; + } + return 0; +err: + dev_warn(&adapter->pdev->dev, + "MSIX Request IRQ failed - err %d\n", status); + pci_disable_msix(adapter->pdev); + adapter->msix_enabled = false; + return status; +} + +static int be_irq_register(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + if (adapter->msix_enabled) { + status = be_msix_register(adapter); + if (status == 0) + goto done; + } + + /* INTx */ + netdev->irq = adapter->pdev->irq; + status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, + adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "INTx request IRQ failed - err %d\n", status); + return status; + } +done: + adapter->isr_registered = true; + return 0; +} + +static void be_irq_unregister(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vec; + + if (!adapter->isr_registered) + return; + + /* INTx */ + if (!adapter->msix_enabled) { + free_irq(netdev->irq, adapter); + goto done; + } + + /* MSIx */ + vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id); + free_irq(vec, adapter); + vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id); + free_irq(vec, adapter); +done: + adapter->isr_registered = false; + return; +} + +static int be_open(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + u32 if_flags; + int status; + + if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | + BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_PASS_L3L4_ERRORS; + status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr, + false/* pmac_invalid */, &adapter->if_handle, + &adapter->pmac_id); + if (status != 0) + goto do_none; + + status = be_cmd_set_flow_control(ctrl, true, true); + if (status != 0) + goto if_destroy; + + status = be_tx_queues_create(adapter); + if (status != 0) + goto if_destroy; + + status = be_rx_queues_create(adapter); + if (status != 0) + goto tx_qs_destroy; + + /* First time posting */ + be_post_rx_frags(adapter); + + napi_enable(&rx_eq->napi); + napi_enable(&tx_eq->napi); + + be_irq_register(adapter); + + be_intr_set(ctrl, true); + + /* The evt queues are created in the unarmed state; arm them */ + be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); + be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); + + /* The compl queues are created in the unarmed state; arm them */ + be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); + be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0); + + be_link_status_update(adapter); + + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); + return 0; + +tx_qs_destroy: + be_tx_queues_destroy(adapter); +if_destroy: + be_cmd_if_destroy(ctrl, adapter->if_handle); +do_none: + return status; +} + +static int be_close(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct be_eq_obj *rx_eq = &adapter->rx_eq; + struct be_eq_obj *tx_eq = &adapter->tx_eq; + int vec; + + cancel_delayed_work(&adapter->work); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + adapter->link.speed = PHY_LINK_SPEED_ZERO; + + be_intr_set(ctrl, false); + + if (adapter->msix_enabled) { + vec = be_msix_vec_get(adapter, tx_eq->q.id); + synchronize_irq(vec); + vec = be_msix_vec_get(adapter, rx_eq->q.id); + synchronize_irq(vec); + } else { + synchronize_irq(netdev->irq); + } + be_irq_unregister(adapter); + + napi_disable(&rx_eq->napi); + napi_disable(&tx_eq->napi); + + be_rx_queues_destroy(adapter); + be_tx_queues_destroy(adapter); + + be_cmd_if_destroy(ctrl, adapter->if_handle); + return 0; +} + +static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, + u64 *hdr_flags, void *priv) +{ + struct ethhdr *eh; + struct vlan_ethhdr *veh; + struct iphdr *iph; + u8 *va = page_address(frag->page) + frag->page_offset; + unsigned long ll_hlen; + + prefetch(va); + eh = (struct ethhdr *)va; + *mac_hdr = eh; + ll_hlen = ETH_HLEN; + if (eh->h_proto != htons(ETH_P_IP)) { + if (eh->h_proto == htons(ETH_P_8021Q)) { + veh = (struct vlan_ethhdr *)va; + if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -1; + + ll_hlen += VLAN_HLEN; + } else { + return -1; + } + } + *hdr_flags = LRO_IPV4; + iph = (struct iphdr *)(va + ll_hlen); + *ip_hdr = iph; + if (iph->protocol != IPPROTO_TCP) + return -1; + *hdr_flags |= LRO_TCP; + *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); + + return 0; +} + +static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev) +{ + struct net_lro_mgr *lro_mgr; + + lro_mgr = &adapter->rx_obj.lro_mgr; + lro_mgr->dev = netdev; + lro_mgr->features = LRO_F_NAPI; + lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; + lro_mgr->lro_arr = adapter->rx_obj.lro_desc; + lro_mgr->get_frag_header = be_get_frag_header; + lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME; +} + +static struct net_device_ops be_netdev_ops = { + .ndo_open = be_open, + .ndo_stop = be_close, + .ndo_start_xmit = be_xmit, + .ndo_get_stats = be_get_stats, + .ndo_set_rx_mode = be_set_multicast_list, + .ndo_set_mac_address = be_mac_addr_set, + .ndo_change_mtu = be_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_register = be_vlan_register, + .ndo_vlan_rx_add_vid = be_vlan_add_vid, + .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, +}; + +static void be_netdev_init(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + netdev->flags |= IFF_MULTICAST; + + BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); + + SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); + + be_lro_init(adapter, netdev); + + netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, + BE_NAPI_WEIGHT); + netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, + BE_NAPI_WEIGHT); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); +} + +static void be_unmap_pci_bars(struct be_adapter *adapter) +{ + struct be_ctrl_info *ctrl = &adapter->ctrl; + if (ctrl->csr) + iounmap(ctrl->csr); + if (ctrl->db) + iounmap(ctrl->db); + if (ctrl->pcicfg) + iounmap(ctrl->pcicfg); +} + +static int be_map_pci_bars(struct be_adapter *adapter) +{ + u8 __iomem *addr; + + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), + pci_resource_len(adapter->pdev, 2)); + if (addr == NULL) + return -ENOMEM; + adapter->ctrl.csr = addr; + + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), + 128 * 1024); + if (addr == NULL) + goto pci_map_err; + adapter->ctrl.db = addr; + + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), + pci_resource_len(adapter->pdev, 1)); + if (addr == NULL) + goto pci_map_err; + adapter->ctrl.pcicfg = addr; + + return 0; +pci_map_err: + be_unmap_pci_bars(adapter); + return -ENOMEM; +} + + +static void be_ctrl_cleanup(struct be_adapter *adapter) +{ + struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced; + + be_unmap_pci_bars(adapter); + + if (mem->va) + pci_free_consistent(adapter->pdev, mem->size, + mem->va, mem->dma); +} + +/* Initialize the mbox required to send cmds to BE */ +static int be_ctrl_init(struct be_adapter *adapter) +{ + struct be_ctrl_info *ctrl = &adapter->ctrl; + struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; + struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; + int status; + u32 val; + + status = be_map_pci_bars(adapter); + if (status) + return status; + + mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; + mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, + mbox_mem_alloc->size, &mbox_mem_alloc->dma); + if (!mbox_mem_alloc->va) { + be_unmap_pci_bars(adapter); + return -1; + } + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); + mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); + mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); + memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); + spin_lock_init(&ctrl->cmd_lock); + + val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); + ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & + MEMBAR_CTRL_INT_CTRL_PFUNC_MASK; + return 0; +} + +static void be_stats_cleanup(struct be_adapter *adapter) +{ + struct be_stats_obj *stats = &adapter->stats; + struct be_dma_mem *cmd = &stats->cmd; + + if (cmd->va) + pci_free_consistent(adapter->pdev, cmd->size, + cmd->va, cmd->dma); +} + +static int be_stats_init(struct be_adapter *adapter) +{ + struct be_stats_obj *stats = &adapter->stats; + struct be_dma_mem *cmd = &stats->cmd; + + cmd->size = sizeof(struct be_cmd_req_get_stats); + cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); + if (cmd->va == NULL) + return -1; + return 0; +} + +static void __devexit be_remove(struct pci_dev *pdev) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + unregister_netdev(adapter->netdev); + + be_stats_cleanup(adapter); + + be_ctrl_cleanup(adapter); + + if (adapter->msix_enabled) { + pci_disable_msix(adapter->pdev); + adapter->msix_enabled = false; + } + + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + pci_disable_device(pdev); + + free_netdev(adapter->netdev); +} + +static int be_hw_up(struct be_adapter *adapter) +{ + struct be_ctrl_info *ctrl = &adapter->ctrl; + int status; + + status = be_cmd_POST(ctrl); + if (status) + return status; + + status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver); + if (status) + return status; + + status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num); + return status; +} + +static int __devinit be_probe(struct pci_dev *pdev, + const struct pci_device_id *pdev_id) +{ + int status = 0; + struct be_adapter *adapter; + struct net_device *netdev; + struct be_ctrl_info *ctrl; + u8 mac[ETH_ALEN]; + + status = pci_enable_device(pdev); + if (status) + goto do_none; + + status = pci_request_regions(pdev, DRV_NAME); + if (status) + goto disable_dev; + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct be_adapter)); + if (netdev == NULL) { + status = -ENOMEM; + goto rel_reg; + } + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + pci_set_drvdata(pdev, adapter); + adapter->netdev = netdev; + + be_msix_enable(adapter); + + status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!status) { + netdev->features |= NETIF_F_HIGHDMA; + } else { + status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (status) { + dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); + goto free_netdev; + } + } + + ctrl = &adapter->ctrl; + status = be_ctrl_init(adapter); + if (status) + goto free_netdev; + + status = be_stats_init(adapter); + if (status) + goto ctrl_clean; + + status = be_hw_up(adapter); + if (status) + goto stats_clean; + + status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK, + true /* permanent */, 0); + if (status) + goto stats_clean; + memcpy(netdev->dev_addr, mac, ETH_ALEN); + + INIT_DELAYED_WORK(&adapter->work, be_worker); + be_netdev_init(netdev); + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + status = register_netdev(netdev); + if (status != 0) + goto stats_clean; + + dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num); + return 0; + +stats_clean: + be_stats_cleanup(adapter); +ctrl_clean: + be_ctrl_cleanup(adapter); +free_netdev: + free_netdev(adapter->netdev); +rel_reg: + pci_release_regions(pdev); +disable_dev: + pci_disable_device(pdev); +do_none: + dev_warn(&pdev->dev, BE_NAME " initialization failed\n"); + return status; +} + +static int be_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + if (netif_running(netdev)) { + rtnl_lock(); + be_close(netdev); + rtnl_unlock(); + } + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int be_resume(struct pci_dev *pdev) +{ + int status = 0; + struct be_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + status = pci_enable_device(pdev); + if (status) + return status; + + pci_set_power_state(pdev, 0); + pci_restore_state(pdev); + + be_vids_config(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + be_open(netdev); + rtnl_unlock(); + } + netif_device_attach(netdev); + return 0; +} + +static struct pci_driver be_driver = { + .name = DRV_NAME, + .id_table = be_dev_ids, + .probe = be_probe, + .remove = be_remove, + .suspend = be_suspend, + .resume = be_resume +}; + +static int __init be_init_module(void) +{ + if (rx_frag_size != 8192 && rx_frag_size != 4096 + && rx_frag_size != 2048) { + printk(KERN_WARNING DRV_NAME + " : Module param rx_frag_size must be 2048/4096/8192." + " Using 2048\n"); + rx_frag_size = 2048; + } + /* Ensure rx_frag_size is aligned to chache line */ + if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) { + printk(KERN_WARNING DRV_NAME + " : Bad module param rx_frag_size. Using 2048\n"); + rx_frag_size = 2048; + } + + return pci_register_driver(&be_driver); +} +module_init(be_init_module); + +static void __exit be_exit_module(void) +{ + pci_unregister_driver(&be_driver); +} +module_exit(be_exit_module); -- cgit v1.2.3-70-g09d2 From 9fae6c3f648e38f023b99b5f5a5280907b2e796e Mon Sep 17 00:00:00 2001 From: Ilya Yanok Date: Fri, 13 Mar 2009 09:51:46 -0700 Subject: dnet: replace obsolete *netif_rx_* functions with *napi_* *netif_rx_* functions is obsolete and removed in newer kernels so we need to use corresponding *napi_* functions instead. Signed-off-by: Ilya Yanok Signed-off-by: David S. Miller --- drivers/net/dnet.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 4b96974fe76..5c347f70cb6 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -408,7 +408,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) * packets waiting */ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { - netif_rx_complete(napi); + napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); @@ -447,7 +447,7 @@ static int dnet_poll(struct napi_struct *napi, int budget) if (npackets < budget) { /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts */ - netif_rx_complete(napi); + napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); @@ -507,7 +507,7 @@ static irqreturn_t dnet_interrupt(int irq, void *dev_id) } if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { - if (netif_rx_schedule_prep(&bp->napi)) { + if (napi_schedule_prep(&bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers @@ -516,7 +516,7 @@ static irqreturn_t dnet_interrupt(int irq, void *dev_id) int_enable = dnet_readl(bp, INTR_ENB); int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); - __netif_rx_schedule(&bp->napi); + __napi_schedule(&bp->napi); } handled = 1; } -- cgit v1.2.3-70-g09d2 From c3c6496dc3d94d87bb0da86cf0bf48764577bf77 Mon Sep 17 00:00:00 2001 From: Ron Mercer Date: Wed, 11 Mar 2009 11:55:40 +0000 Subject: qlge: bugfix: Increase filter on inbound csum. Chip does not do UDP checksum when fragmentation occurs. Signed-off-by: Ron Mercer Signed-off-by: David S. Miller --- drivers/net/qlge/qlge.h | 1 + drivers/net/qlge/qlge_main.c | 38 ++++++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index e6fdce9206c..aff9c5fec73 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -927,6 +927,7 @@ struct ib_mac_iocb_rsp { u8 flags1; #define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ #define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ +#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ #define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ #define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ #define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 8ea72dc60f7..87787b1ecab 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -1436,18 +1436,32 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); } - if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) { - QPRINTK(qdev, RX_STATUS, ERR, - "Bad checksum for this %s packet.\n", - ((ib_mac_rsp-> - flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP")); - skb->ip_summed = CHECKSUM_NONE; - } else if (qdev->rx_csum && - ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) || - ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) { - QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->protocol = eth_type_trans(skb, ndev); + skb->ip_summed = CHECKSUM_NONE; + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if (qdev->rx_csum && + !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + QPRINTK(qdev, RX_STATUS, DEBUG, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + cpu_to_be16(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + QPRINTK(qdev, RX_STATUS, DEBUG, + "TCP checksum done!\n"); + } + } } qdev->stats.rx_packets++; qdev->stats.rx_bytes += skb->len; -- cgit v1.2.3-70-g09d2 From a7a655f22c75f48e0afe8b86be03ecd70bd68b07 Mon Sep 17 00:00:00 2001 From: Ron Mercer Date: Wed, 11 Mar 2009 11:55:41 +0000 Subject: qlge: bugfix: Tell hw to strip vlan header. Signed-off-by: Ron Mercer Signed-off-by: David S. Miller --- drivers/net/qlge/qlge_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 87787b1ecab..54d54ea03d4 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -2984,9 +2984,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) mask = value << 16; ql_write32(qdev, SYS, mask | value); - /* Set the default queue. */ - value = NIC_RCV_CFG_DFQ; - mask = NIC_RCV_CFG_DFQ_MASK; + /* Set the default queue, and VLAN behavior. */ + value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; + mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); ql_write32(qdev, NIC_RCV_CFG, (mask | value)); /* Set the MPI interrupt to enabled. */ -- cgit v1.2.3-70-g09d2 From 6612a6344aba8ba7b5af67cd006453bfedbb2967 Mon Sep 17 00:00:00 2001 From: Ron Mercer Date: Wed, 11 Mar 2009 11:55:42 +0000 Subject: qlge: bugfix: Move netif_napi_del() to common call point. Moving netif_napi_del() up the call chain so it will get called from all exit points. Signed-off-by: Ron Mercer Signed-off-by: David S. Miller --- drivers/net/qlge/qlge_main.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 54d54ea03d4..00c37c0f8a9 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -3163,6 +3163,11 @@ static int ql_adapter_down(struct ql_adapter *qdev) ql_tx_ring_clean(qdev); + /* Call netif_napi_del() from common point. + */ + for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + spin_lock(&qdev->hw_lock); status = ql_adapter_reset(qdev); if (status) @@ -3867,7 +3872,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); - int err, i; + int err; netif_device_detach(ndev); @@ -3877,9 +3882,6 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) return err; } - for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - err = pci_save_state(pdev); if (err) return err; -- cgit v1.2.3-70-g09d2 From 855b0993f216a9b0f9cb33573bd05e314105d86c Mon Sep 17 00:00:00 2001 From: Ron Mercer Date: Wed, 11 Mar 2009 11:55:43 +0000 Subject: qlge: bugfix: Pad outbound frames smaller than 60 bytes. With some asic configurations xmit of frames smaller than 60 bytes may fail. Signed-off-by: Ron Mercer Signed-off-by: David S. Miller --- drivers/net/qlge/qlge_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 00c37c0f8a9..91191f761fb 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -1941,6 +1941,9 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev) tx_ring = &qdev->tx_ring[tx_ring_idx]; + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { QPRINTK(qdev, TX_QUEUED, INFO, "%s: shutting down tx queue %d du to lack of resources.\n", -- cgit v1.2.3-70-g09d2 From 59f8e169e25c5fce91826412c38359ecaf940b82 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Fri, 13 Mar 2009 13:37:46 -0700 Subject: via-velocity: Fix DMA mapping length errors on transmit. From: Dave Jones The dma-debug changes caught that this driver uses the wrong DMA mapping length when skb_padto() does something. With suggestions from Eric Dumazet. Signed-off-by: David S. Miller --- drivers/net/via-velocity.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index c5691fdb707..fb53ef872df 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1838,17 +1838,19 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ { struct sk_buff *skb = tdinfo->skb; int i; + int pktlen; /* * Don't unmap the pre-allocated tx_bufs */ if (tdinfo->skb_dma) { + pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN); for (i = 0; i < tdinfo->nskb_dma; i++) { #ifdef VELOCITY_ZERO_COPY_SUPPORT pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); #else - pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); #endif tdinfo->skb_dma[i] = 0; } @@ -2080,17 +2082,14 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) struct tx_desc *td_ptr; struct velocity_td_info *tdinfo; unsigned long flags; - int pktlen = skb->len; + int pktlen; __le16 len; int index; - - if (skb->len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) - goto out; - pktlen = ETH_ZLEN; - } + if (skb_padto(skb, ETH_ZLEN)) + goto out; + pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); len = cpu_to_le16(pktlen); -- cgit v1.2.3-70-g09d2 From e90d400c2b65c7bf038d3646780f4a81f602cd19 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Tue, 10 Mar 2009 16:00:24 +0000 Subject: ixgbe: fix multiple unicast address support Multiple unicast address support appears to have been broken with the change to support net_device_ops. This a regression from 2.6.28 to 2.6.29. I'm not 100% on whether ndo_set_multicast_list can be NULL after this or not. If ndo_set_rx_mode is set everything _should_ be using it. Signed-off-by: Chris Leech Acked-by: Peter P Waskiewicz Jr Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d2f4d5f508b..5d364a96e35 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3973,6 +3973,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, .ndo_get_stats = ixgbe_get_stats, + .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_set_multicast_list = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, -- cgit v1.2.3-70-g09d2 From 9616a75505be9b87f9625c4d87d8b07a45ddad4d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 13 Mar 2009 13:48:46 -0700 Subject: emac: Fix clock control for 405EX and 405EXr chips The EMAC variant in the 405EX and 405EXr chips needs the "440EP" type clock control workaround to avoid lockups of the Rx side during reset. Signed-off-by: Benjamin Herrenschmidt Tested-by: Felix Radensky Signed-off-by: David S. Miller --- drivers/net/ibm_newemac/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 87a706694fb..6fd7aa61736 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2594,6 +2594,9 @@ static int __devinit emac_init_config(struct emac_instance *dev) if (of_device_is_compatible(np, "ibm,emac-460ex") || of_device_is_compatible(np, "ibm,emac-460gt")) dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX; + if (of_device_is_compatible(np, "ibm,emac-405ex") || + of_device_is_compatible(np, "ibm,emac-405exr")) + dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; } else if (of_device_is_compatible(np, "ibm,emac4")) { dev->features |= EMAC_FTR_EMAC4; if (of_device_is_compatible(np, "ibm,emac-440gx")) -- cgit v1.2.3-70-g09d2 From 08ec9af1c0622b0858099a8644a33af02dd3019f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 13 Mar 2009 14:22:40 -0700 Subject: xfrm: Fix xfrm_state_find() wrt. wildcard source address. The change to make xfrm_state objects hash on source address broke the case where such source addresses are wildcarded. Fix this by doing a two phase lookup, first with fully specified source address, next using saddr wildcarded. Reported-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/xfrm/xfrm_state.c | 90 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index e25ff62ab2a..62a5425cc6a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -748,12 +748,51 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) schedule_work(&net->xfrm.state_hash_work); } +static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, + struct flowi *fl, unsigned short family, + xfrm_address_t *daddr, xfrm_address_t *saddr, + struct xfrm_state **best, int *acq_in_progress, + int *error) +{ + /* Resolution logic: + * 1. There is a valid state with matching selector. Done. + * 2. Valid state with inappropriate selector. Skip. + * + * Entering area of "sysdeps". + * + * 3. If state is not valid, selector is temporary, it selects + * only session which triggered previous resolution. Key + * manager will do something to install a state with proper + * selector. + */ + if (x->km.state == XFRM_STATE_VALID) { + if ((x->sel.family && + !xfrm_selector_match(&x->sel, fl, x->sel.family)) || + !security_xfrm_state_pol_flow_match(x, pol, fl)) + return; + + if (!*best || + (*best)->km.dying > x->km.dying || + ((*best)->km.dying == x->km.dying && + (*best)->curlft.add_time < x->curlft.add_time)) + *best = x; + } else if (x->km.state == XFRM_STATE_ACQ) { + *acq_in_progress = 1; + } else if (x->km.state == XFRM_STATE_ERROR || + x->km.state == XFRM_STATE_EXPIRED) { + if (xfrm_selector_match(&x->sel, fl, x->sel.family) && + security_xfrm_state_pol_flow_match(x, pol, fl)) + *error = -ESRCH; + } +} + struct xfrm_state * xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, unsigned short family) { + static xfrm_address_t saddr_wildcard = { }; struct net *net = xp_net(pol); unsigned int h; struct hlist_node *entry; @@ -773,40 +812,27 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, xfrm_state_addr_check(x, daddr, saddr, family) && tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && - (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) { - /* Resolution logic: - 1. There is a valid state with matching selector. - Done. - 2. Valid state with inappropriate selector. Skip. - - Entering area of "sysdeps". - - 3. If state is not valid, selector is temporary, - it selects only session which triggered - previous resolution. Key manager will do - something to install a state with proper - selector. - */ - if (x->km.state == XFRM_STATE_VALID) { - if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) || - !security_xfrm_state_pol_flow_match(x, pol, fl)) - continue; - if (!best || - best->km.dying > x->km.dying || - (best->km.dying == x->km.dying && - best->curlft.add_time < x->curlft.add_time)) - best = x; - } else if (x->km.state == XFRM_STATE_ACQ) { - acquire_in_progress = 1; - } else if (x->km.state == XFRM_STATE_ERROR || - x->km.state == XFRM_STATE_EXPIRED) { - if (xfrm_selector_match(&x->sel, fl, x->sel.family) && - security_xfrm_state_pol_flow_match(x, pol, fl)) - error = -ESRCH; - } - } + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) + xfrm_state_look_at(pol, x, fl, family, daddr, saddr, + &best, &acquire_in_progress, &error); + } + if (best) + goto found; + + h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); + hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { + if (x->props.family == family && + x->props.reqid == tmpl->reqid && + !(x->props.flags & XFRM_STATE_WILDRECV) && + xfrm_state_addr_check(x, daddr, saddr, family) && + tmpl->mode == x->props.mode && + tmpl->id.proto == x->id.proto && + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) + xfrm_state_look_at(pol, x, fl, family, daddr, saddr, + &best, &acquire_in_progress, &error); } +found: x = best; if (!x && !error && !acquire_in_progress) { if (tmpl->id.spi && -- cgit v1.2.3-70-g09d2 From 5a89392225c6147d10328a64b06b756561e97edf Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Fri, 13 Mar 2009 15:48:02 -0700 Subject: mv643xx_eth: fix unicast address filter corruption on mtu change When mv643xx_eth_open() is called to up an interface, port_start() will first re-program the unicast address filter, and then re-initialise the PORT_CONFIG register, but that will disable unicast promiscuous mode if it was enabled by the unicast address filter setup. This isn't a problem on ifconfig up, as ->set_rx_mode() will be called shortly afterwards which will program the filters again, but it does trigger when changing the MTU, which calls mv643xx_eth_stop() and then mv643xx_eth_open() by hand to repopulate the receive rings with skbuffs of the new size. Swap the initialisation of the PORT_START register and the call to the unicast filter setup function to fix this. Signed-off-by: Lennert Buytenhek Signed-off-by: David S. Miller --- drivers/net/mv643xx_eth.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 13f11f402a9..b0bc3bc18e9 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -2029,11 +2029,6 @@ static void port_start(struct mv643xx_eth_private *mp) txq_set_fixed_prio_mode(txq); } - /* - * Add configured unicast address to address filter table. - */ - mv643xx_eth_program_unicast_filter(mp->dev); - /* * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast * frames to RX queue #0, and include the pseudo-header when @@ -2046,6 +2041,11 @@ static void port_start(struct mv643xx_eth_private *mp) */ wrlp(mp, PORT_CONFIG_EXT, 0x00000000); + /* + * Add configured unicast addresses to address filter table. + */ + mv643xx_eth_program_unicast_filter(mp->dev); + /* * Enable the receive queues. */ -- cgit v1.2.3-70-g09d2 From de9307c68624b03d2922a02a661ce31e20f078cc Mon Sep 17 00:00:00 2001 From: Dhananjay Phadke Date: Fri, 6 Mar 2009 14:52:12 +0000 Subject: netxen: remove old flash check. Remove flash size check which made sense only for ancient boards with 1MB flash. The check is based on values read from specific locations and fails with firmware size changes. This prevents driver from getting right mac addresses. Signed-off-by: Dhananjay Phadke Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic.h | 1 - drivers/net/netxen/netxen_nic_hw.c | 22 ---------------------- drivers/net/netxen/netxen_nic_main.c | 3 --- 3 files changed, 26 deletions(-) diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index f4dd9acb687..1ff066b2281 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -1595,7 +1595,6 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter) } -int netxen_is_flash_supported(struct netxen_adapter *adapter); int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); extern void netxen_change_ringparam(struct netxen_adapter *adapter); diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 821cff68b3f..7fea7708810 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -706,28 +706,6 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) return rc; } -int netxen_is_flash_supported(struct netxen_adapter *adapter) -{ - const int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 }; - int addr, val01, val02, i, j; - - /* if the flash size less than 4Mb, make huge war cry and die */ - for (j = 1; j < 4; j++) { - addr = j * NETXEN_NIC_WINDOW_MARGIN; - for (i = 0; i < ARRAY_SIZE(locs); i++) { - if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0 - && netxen_rom_fast_read(adapter, (addr + locs[i]), - &val02) == 0) { - if (val01 == val02) - return -1; - } else - return -1; - } - } - - return 0; -} - static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, int size, __le32 * buf) { diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 13087782ac4..c172b6e24a9 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -405,9 +405,6 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - if (netxen_is_flash_supported(adapter) != 0) - return -EIO; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) return -EIO; -- cgit v1.2.3-70-g09d2 From 682337fe062e939578d933c74157ae9a36baa4ce Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 14 Mar 2009 22:26:40 -0700 Subject: igb: remove ASPM L0s workaround The L0s workaround should be moved into a pci quirk and so it is not necessary in the driver. This update removes the L0s workaround from the igb driver. This was the second half of the PCI quirk patch that Matthew Wilcox did not pick up when he picked up the quirk patch. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/igb/igb_main.c | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index a50db5398fa..9dd13ad12ce 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1023,11 +1023,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, struct net_device *netdev; struct igb_adapter *adapter; struct e1000_hw *hw; - struct pci_dev *us_dev; const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; - int i, err, pci_using_dac, pos; - u16 eeprom_data = 0, state = 0; + int i, err, pci_using_dac; + u16 eeprom_data = 0; u16 eeprom_apme_mask = IGB_EEPROM_APME; u32 part_num; int bars, need_ioport; @@ -1062,27 +1061,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, } } - /* 82575 requires that the pci-e link partner disable the L0s state */ - switch (pdev->device) { - case E1000_DEV_ID_82575EB_COPPER: - case E1000_DEV_ID_82575EB_FIBER_SERDES: - case E1000_DEV_ID_82575GB_QUAD_COPPER: - us_dev = pdev->bus->self; - pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP); - if (pos) { - pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL, - &state); - state &= ~PCIE_LINK_STATE_L0S; - pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL, - state); - dev_info(&pdev->dev, - "Disabling ASPM L0s upstream switch port %s\n", - pci_name(us_dev)); - } - default: - break; - } - err = pci_request_selected_regions(pdev, bars, igb_driver_name); if (err) goto err_pci_reg; -- cgit v1.2.3-70-g09d2 From 97d477a914b146e7e6722ded21afa79886ae8ccd Mon Sep 17 00:00:00 2001 From: françois romieu Date: Sun, 15 Mar 2009 01:09:54 +0000 Subject: r8169: use hardware auto-padding. It shortens the code and fixes the current pci_unmap leak with padded skb reported by Dave Jones. Signed-off-by: Francois Romieu Signed-off-by: David S. Miller --- drivers/net/r8169.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index b3473401c83..352da2a43c9 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3363,13 +3363,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) opts1 |= FirstFrag; } else { len = skb->len; - - if (unlikely(len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) - goto err_update_stats; - len = ETH_ZLEN; - } - opts1 |= FirstFrag | LastFrag; tp->tx_skb[entry].skb = skb; } @@ -3407,7 +3400,6 @@ out: err_stop: netif_stop_queue(dev); ret = NETDEV_TX_BUSY; -err_update_stats: dev->stats.tx_dropped++; goto out; } -- cgit v1.2.3-70-g09d2 From ea8dbdd17099a9a5864ebd4c87e01e657b19c7ab Mon Sep 17 00:00:00 2001 From: françois romieu Date: Sun, 15 Mar 2009 01:10:50 +0000 Subject: r8169: revert "r8169: read MAC address from EEPROM on init (2nd attempt)" It fails on the following systems: - RTL8169sc/8110sc (XID 18000000) reported by Tim Durack (x86) - RTL8169sb/8110sb (XID 10000000) reported by Mikael Pettersson (ARM) The patch appeared to work on x86 for the following systems: RTL8169sb/8110sb 10000000 PCI (EXT) RTL8110s 04000000 PCI (EXT) RTL8102e 24a00000 PCI-E (LOM) RTL8168c/8111c 3c2000c0 PCI-E (LOM) RTL8168b/8111b 38000000 PCI-E (LOM) RTL8168b/8111b 38000000 PCI-E (EXT) The patch exposes two problems: 1) while not completely wrong, mac addresses are not read correctly from the EEPROM 2) the MAC address registers are not correctly set Signed-off-by: Francois Romieu Tested-by: Mikael Pettersson Signed-off-by: David S. Miller --- drivers/net/r8169.c | 114 +--------------------------------------------------- 1 file changed, 2 insertions(+), 112 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 352da2a43c9..43fedb9eced 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -81,9 +81,9 @@ static const int multicast_filter_limit = 32; #define RTL8169_TX_TIMEOUT (6*HZ) #define RTL8169_PHY_TIMEOUT (10*HZ) -#define RTL_EEPROM_SIG 0x8129 +#define RTL_EEPROM_SIG cpu_to_le32(0x8129) +#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) #define RTL_EEPROM_SIG_ADDR 0x0000 -#define RTL_EEPROM_MAC_ADDR 0x0007 /* write/read MMIO register */ #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) @@ -293,11 +293,6 @@ enum rtl_register_content { /* Cfg9346Bits */ Cfg9346_Lock = 0x00, Cfg9346_Unlock = 0xc0, - Cfg9346_Program = 0x80, /* Programming mode */ - Cfg9346_EECS = 0x08, /* Chip select */ - Cfg9346_EESK = 0x04, /* Serial data clock */ - Cfg9346_EEDI = 0x02, /* Data input */ - Cfg9346_EEDO = 0x01, /* Data output */ /* rx_mode_bits */ AcceptErr = 0x20, @@ -310,7 +305,6 @@ enum rtl_register_content { /* RxConfigBits */ RxCfgFIFOShift = 13, RxCfgDMAShift = 8, - RxCfg9356SEL = 6, /* EEPROM type: 0 = 9346, 1 = 9356 */ /* TxConfigBits */ TxInterFrameGapShift = 24, @@ -1969,108 +1963,6 @@ static const struct net_device_ops rtl8169_netdev_ops = { }; -/* Delay between EEPROM clock transitions. Force out buffered PCI writes. */ -#define RTL_EEPROM_DELAY() RTL_R8(Cfg9346) -#define RTL_EEPROM_READ_CMD 6 - -/* read 16bit word stored in EEPROM. EEPROM is addressed by words. */ -static u16 rtl_eeprom_read(void __iomem *ioaddr, int addr) -{ - u16 result = 0; - int cmd, cmd_len, i; - - /* check for EEPROM address size (in bits) */ - if (RTL_R32(RxConfig) & (1 << RxCfg9356SEL)) { - /* EEPROM is 93C56 */ - cmd_len = 3 + 8; /* 3 bits for command id and 8 for address */ - cmd = (RTL_EEPROM_READ_CMD << 8) | (addr & 0xff); - } else { - /* EEPROM is 93C46 */ - cmd_len = 3 + 6; /* 3 bits for command id and 6 for address */ - cmd = (RTL_EEPROM_READ_CMD << 6) | (addr & 0x3f); - } - - /* enter programming mode */ - RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS); - RTL_EEPROM_DELAY(); - - /* write command and requested address */ - while (cmd_len--) { - u8 x = Cfg9346_Program | Cfg9346_EECS; - - x |= (cmd & (1 << cmd_len)) ? Cfg9346_EEDI : 0; - - /* write a bit */ - RTL_W8(Cfg9346, x); - RTL_EEPROM_DELAY(); - - /* raise clock */ - RTL_W8(Cfg9346, x | Cfg9346_EESK); - RTL_EEPROM_DELAY(); - } - - /* lower clock */ - RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS); - RTL_EEPROM_DELAY(); - - /* read back 16bit value */ - for (i = 16; i > 0; i--) { - /* raise clock */ - RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS | Cfg9346_EESK); - RTL_EEPROM_DELAY(); - - result <<= 1; - result |= (RTL_R8(Cfg9346) & Cfg9346_EEDO) ? 1 : 0; - - /* lower clock */ - RTL_W8(Cfg9346, Cfg9346_Program | Cfg9346_EECS); - RTL_EEPROM_DELAY(); - } - - RTL_W8(Cfg9346, Cfg9346_Program); - /* leave programming mode */ - RTL_W8(Cfg9346, Cfg9346_Lock); - - return result; -} - -static void rtl_init_mac_address(struct rtl8169_private *tp, - void __iomem *ioaddr) -{ - struct pci_dev *pdev = tp->pci_dev; - u16 x; - u8 mac[8]; - - /* read EEPROM signature */ - x = rtl_eeprom_read(ioaddr, RTL_EEPROM_SIG_ADDR); - - if (x != RTL_EEPROM_SIG) { - dev_info(&pdev->dev, "Missing EEPROM signature: %04x\n", x); - return; - } - - /* read MAC address */ - x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR); - mac[0] = x & 0xff; - mac[1] = x >> 8; - x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 1); - mac[2] = x & 0xff; - mac[3] = x >> 8; - x = rtl_eeprom_read(ioaddr, RTL_EEPROM_MAC_ADDR + 2); - mac[4] = x & 0xff; - mac[5] = x >> 8; - - if (netif_msg_probe(tp)) { - DECLARE_MAC_BUF(buf); - - dev_info(&pdev->dev, "MAC address found in EEPROM: %s\n", - print_mac(buf, mac)); - } - - if (is_valid_ether_addr(mac)) - rtl_rar_set(tp, mac); -} - static int __devinit rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2249,8 +2141,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->mmio_addr = ioaddr; - rtl_init_mac_address(tp, ioaddr); - /* Get MAC address */ for (i = 0; i < MAC_ADDR_LEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); -- cgit v1.2.3-70-g09d2