diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2012-11-21 10:38:13 +0000 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-11-21 10:38:13 +0000 |
commit | 851462444d421c223965b12b836bef63da61b57f (patch) | |
tree | 495baa14e638817941496c36e1443aed7dae0ea0 /drivers/net/ethernet | |
parent | 5a6ea4af0907f995dc06df21a9c9ef764c7cd3bc (diff) | |
parent | 6924d99fcdf1a688538a3cdebd1f135c22eec191 (diff) |
Merge branch 'for-3.7' of git://git.infradead.org/users/dedekind/l2-mtd
Conflicts:
drivers/mtd/nand/nand_base.c
Diffstat (limited to 'drivers/net/ethernet')
50 files changed, 382 insertions, 363 deletions
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 64d0d9c1afa..3491d4312fc 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ printk(KERN_ERR "amd8111e: No Power Management capability, " "exiting.\n"); + err = -ENODEV; goto err_free_reg; } @@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR "amd8111e: DMA not supported," "exiting.\n"); + err = -ENODEV; goto err_free_reg; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 397596b078d..f195acfa2df 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev) snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - if (aup->mii_bus->irq == NULL) + if (aup->mii_bus->irq == NULL) { + err = -ENOMEM; goto err_out; + } for (i = 0; i < PHY_MAX_ADDR; ++i) aup->mii_bus->irq[i] = PHY_POLL; @@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev) goto err_mdiobus_reg; } - if (au1000_mii_probe(dev) != 0) + err = au1000_mii_probe(dev); + if (err != 0) goto err_out; pDBfree = NULL; @@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) } aup->pDBfree = pDBfree; + err = -ENODEV; for (i = 0; i < NUM_RX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) @@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev) aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->rx_db_inuse[i] = pDB; } + + err = -ENODEV; for (i = 0; i < NUM_TX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 24220992413..4833b6a9031 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_shinfo(skb)->nr_frags + BDS_PER_TX_PKT + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; - netif_tx_stop_queue(txq); - BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + /* Handle special storage cases separately */ + if (txdata->tx_ring_size != 0) { + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + netif_tx_stop_queue(txq); + } + return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index e2e45ee5df3..6dd0dd076cc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -137,7 +137,16 @@ #define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD #define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD - +#define LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) #define SFP_EEPROM_CON_TYPE_ADDR 0x2 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 @@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) DEFAULT_PHY_DEV_ADDR); } +static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Set correct devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, + phy->def_md_devad); + break; + } +} + static void bnx2x_xgxs_deassert(struct link_params *params) { struct bnx2x *bp = params->bp; @@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params) REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); + bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params, + PHY_INIT); } static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, @@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 val16 = 0, lane, i; + u16 lane, i, cl72_ctrl, an_adv = 0; + u16 ucode_ver; struct bnx2x *bp = params->bp; static struct bnx2x_reg_set reg_set[] = { {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555}, {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, @@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0xf8ff; + cl72_ctrl |= 0x3800; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); + /* Check adding advertisement for 1G KX */ if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; - val16 |= (1<<5); + an_adv |= (1<<5); /* Enable CL37 1G Parallel Detect */ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); @@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || (vars->line_speed == SPEED_10000)) { /* Check adding advertisement for 10G KR */ - val16 |= (1<<7); + an_adv |= (1<<7); /* Enable 10G Parallel Detect */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 1); - + bnx2x_set_aer_mmd(params, phy); DP(NETIF_MSG_LINK, "Advertize 10G\n"); } @@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertised speeds */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); /* Advertised and set FEC (Forward Error Correction) */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); - if (val16 < 0xd108) { - DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); + MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); + if (ucode_ver < 0xd108) { + DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", + ucode_ver); vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; } bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, @@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 i; + u16 val16, i, lane; static struct bnx2x_reg_set reg_set[] = { /* Disable Autoneg */ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00}, {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, - /* Disable CL36 PCS Tx */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0}, - /* Double Wide Single Data Rate @ pll rate */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF}, /* Leave cl72 training enable, needed for KR */ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, @@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - /* Leave CL72 enabled */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - 0x3800); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Global registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); /* Set speed via PMA/PMD register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); @@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u16 val16; + u16 val16, lane; bnx2x_sfp_e3_set_transmitter(params, phy, 0); bnx2x_set_mdio_clk(bp, params->chip_id, params->port); bnx2x_set_aer_mmd(params, phy); @@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, MDIO_WC_REG_XGXSBLK1_LANECTRL2, val16 & 0xff00); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); + } static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, @@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params, vars->mac_type = MAC_TYPE_NONE; /* Update shared memory */ - vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | - LINK_STATUS_LINK_UP | - LINK_STATUS_PHYSICAL_LINK_FLAG | - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | - LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | - LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | - LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | - LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE); + vars->link_status &= ~LINK_UPDATE_MASK; vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status); @@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; u8 active_external_phy = INT_PHY; vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_UPDATE_MASK; for (phy_index = INT_PHY; phy_index < params->num_phys; phy_index++) { phy_vars[phy_index].flow_ctrl = 0; @@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params, static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, u16 addr, u8 byte_cnt, - u8 *o_buf) + u8 *o_buf, u8 is_init) { int rc = 0; u8 i, j = 0, cnt = 0; @@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* 4 byte aligned address */ addr32 = addr & (~0x3); do { - if (cnt == I2C_WA_PWR_ITER) { + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { bnx2x_warpcore_power_module(params, phy, 0); /* Note that 100us are not enough here */ - usleep_range(1000,1000); + usleep_range(1000, 2000); bnx2x_warpcore_power_module(params, phy, 1); } rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, @@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf, 0); break; } return rc; @@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, { u8 val; + int rc; struct bnx2x *bp = params->bp; u16 timeout; /* Initialization time after hot-plug may take up to 300ms for @@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, */ for (timeout = 0; timeout < 60; timeout++) { - if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) - == 0) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, + params, 1, + 1, &val, 1); + else + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, + &val); + if (rc == 0) { DP(NETIF_MSG_LINK, "SFP+ module initialization took %d ms\n", timeout * 5); @@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, } usleep_range(5000, 10000); } - return -EINVAL; + rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); + return rc; } static void bnx2x_8727_power_module(struct bnx2x *bp, @@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func }; static struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, @@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_BASE_T; break; case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); phy->media_type = ETH_PHY_XFP_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_SFI: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d5648fc666b..bd1fd3d87c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6794,8 +6794,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, BLOCK_BRB1, init_phase); if (IS_MF(bp)) low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); @@ -11902,7 +11903,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, /* disable FCOE L2 queue for E1x */ if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; - + /* disable FCOE for 57840 device, until FW supports it */ + switch (ent->driver_data) { + case BCM57840_O: + case BCM57840_4_10: + case BCM57840_2_20: + case BCM57840_MFO: + case BCM57840_MF: + bp->flags |= NO_FCOE_FLAG; + } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 71971a161bd..614981c0226 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp, /* Check if this request is ok */ rc = o->validate(bp, o->owner, elem); if (rc) { - BNX2X_ERR("Preamble failed: %d\n", rc); + DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); goto free_and_exit; } } diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 2b4b4f529ab..16814b34d4b 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -375,7 +375,6 @@ struct xgmac_priv { unsigned int tx_tail; void __iomem *base; - struct sk_buff_head rx_recycle; unsigned int dma_buf_sz; dma_addr_t dma_rx_phy; dma_addr_t dma_tx_phy; @@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) p = priv->dma_rx + entry; if (priv->rx_skbuff[entry] == NULL) { - skb = __skb_dequeue(&priv->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); + skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); if (unlikely(skb == NULL)) break; @@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) desc_get_buf_len(p), DMA_TO_DEVICE); } - /* - * If there's room in the queue (limit it to size) - * we add this skb back into the pool, - * if it's the right size. - */ - if ((skb_queue_len(&priv->rx_recycle) < - DMA_RX_RING_SZ) && - skb_recycle_check(skb, priv->dma_buf_sz)) - __skb_queue_head(&priv->rx_recycle, skb); - else - dev_kfree_skb(skb); + dev_kfree_skb(skb); } if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > @@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev) dev->dev_addr); } - skb_queue_head_init(&priv->rx_recycle); memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); /* Initialize the XGMAC and descriptors */ @@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev) napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); - skb_queue_purge(&priv->rx_recycle); /* Disable the MAC core */ xgmac_mac_disable(priv->base); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 31752b24434..378988b5709 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -251,6 +251,8 @@ struct adapter_params { unsigned char rev; /* chip revision */ unsigned char offload; + unsigned char bypass; + unsigned int ofldq_wr_cred; }; @@ -642,6 +644,23 @@ extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) +static inline int is_bypass(struct adapter *adap) +{ + return adap->params.bypass; +} + +static inline int is_bypass_device(int device) +{ + /* this should be set based upon device capabilities */ + switch (device) { + case 0x440b: + case 0x440c: + return 1; + default: + return 0; + } +} + static inline unsigned int core_ticks_per_usec(const struct adapter *adap) { return adap->params.vpd.cclk / 1000; @@ -696,6 +715,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable); int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); unsigned int t4_flash_cfg_addr(struct adapter *adapter); +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_check_fw_version(struct adapter *adapter); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6b9f6bb2f7e..0df1284df49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ module_param(dbfifo_int_thresh, int, 0644); MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); -int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ +/* + * usecs to sleep while draining the dbfifo + */ +static int dbfifo_drain_delay = 1000; module_param(dbfifo_drain_delay, int, 0644); MODULE_PARM_DESC(dbfifo_drain_delay, "usecs to sleep while draining the dbfifo"); @@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap) static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, adap->msix_info[1].desc, &s->fw_evtq); @@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap) return err; for_each_ethrxq(s, ethqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ethrxq[ethqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_ofldrxq(s, ofldqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ofldrxq[ofldqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_rdmarxq(s, rdmaqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->rdmarxq[rdmaqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } return 0; unwind: while (--rdmaqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); while (--ofldqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->ofldrxq[ofldqidx].rspq); while (--ethqidx >= 0) - free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[--msi_index].vec, + &s->ethrxq[ethqidx].rspq); free_irq(adap->msix_info[1].vec, &s->fw_evtq); return err; } static void free_msix_queue_irqs(struct adapter *adap) { - int i, msi = 2; + int i, msi_index = 2; struct sge *s = &adap->sge; free_irq(adap->msix_info[1].vec, &s->fw_evtq); for_each_ethrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); for_each_rdmarxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); } /** @@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); if (!ret) { - indices = be64_to_cpu(indices); - *cidx = (indices >> 25) & 0xffff; - *pidx = (indices >> 9) & 0xffff; + *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; + *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; } return ret; } @@ -3410,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) finicsum, cfcsum); /* - * If we're a pure NIC driver then disable all offloading facilities. - * This will allow the firmware to optimize aspects of the hardware - * configuration which will result in improved performance. - */ - caps_cmd.ofldcaps = 0; - caps_cmd.iscsicaps = 0; - caps_cmd.rdmacaps = 0; - caps_cmd.fcoecaps = 0; - - /* * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = @@ -3507,18 +3503,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; -#ifndef CONFIG_CHELSIO_T4_OFFLOAD - /* - * If we're a pure NIC driver then disable all offloading facilities. - * This will allow the firmware to optimize aspects of the hardware - * configuration which will result in improved performance. - */ - caps_cmd.ofldcaps = 0; - caps_cmd.iscsicaps = 0; - caps_cmd.rdmacaps = 0; - caps_cmd.fcoecaps = 0; -#endif - if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { if (!vf_acls) caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); @@ -3634,10 +3618,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset) * field selections will fit in the 36-bit budget. */ if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { - int i, bits = 0; + int j, bits = 0; - for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) - switch (tp_vlan_pri_map & (1 << i)) { + for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) + switch (tp_vlan_pri_map & (1 << j)) { case 0: /* compressed filter field not enabled */ break; @@ -3739,6 +3723,7 @@ static int adap_init0(struct adapter *adap) u32 v, port_vec; enum dev_state state; u32 params[7], val[7]; + struct fw_caps_config_cmd caps_cmd; int reset = 1, j; /* @@ -3892,6 +3877,9 @@ static int adap_init0(struct adapter *adap) goto bye; } + if (is_bypass_device(adap->pdev->device)) + adap->params.bypass = 1; + /* * Grab some of our basic fundamental operating parameters. */ @@ -3934,13 +3922,12 @@ static int adap_init0(struct adapter *adap) adap->tids.aftid_end = val[1]; } -#ifdef CONFIG_CHELSIO_T4_OFFLOAD /* * Get device capabilities so we can determine what resources we need * to manage. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); - caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), @@ -3985,15 +3972,6 @@ static int adap_init0(struct adapter *adap) adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; - params[0] = FW_PARAM_PFVF(ETHOFLD_START); - params[1] = FW_PARAM_PFVF(ETHOFLD_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, - params, val); - if ((val[0] != val[1]) && (ret >= 0)) { - adap->tids.uotid_base = val[0]; - adap->tids.nuotids = val[1] - val[0] + 1; - } - adap->params.offload = 1; } if (caps_cmd.rdmacaps) { @@ -4042,7 +4020,6 @@ static int adap_init0(struct adapter *adap) } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV -#endif /* CONFIG_CHELSIO_T4_OFFLOAD */ /* * These are finalized by FW initialization, load their values now. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 1b899fea1a9..39bec73ff87 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -102,6 +102,9 @@ struct tid_info { unsigned int ftid_base; unsigned int aftid_base; unsigned int aftid_end; + /* Server filter region */ + unsigned int sftid_base; + unsigned int nsftids; spinlock_t atid_lock ____cacheline_aligned_in_smp; union aopen_entry *afree; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 137a24438d9..32eec15fe4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { if (dir) - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); + *data++ = (__force __be32) t4_read_reg(adap, + (MEMWIN0_BASE + i)); else - t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); + t4_write_reg(adap, (MEMWIN0_BASE + i), + (__force u32) *data++); } return 0; @@ -417,7 +419,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, if ((addr & 0x3) || (len & 0x3)) return -EINVAL; - data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32)); + data = vmalloc(MEMWIN0_APERTURE); if (!data) return -ENOMEM; @@ -744,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr, if (ret) return ret; if (byte_oriented) - *data = htonl(*data); + *data = (__force __u32) (htonl(*data)); } return 0; } @@ -992,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; - const u32 *p = (const u32 *)fw_data; + const __be32 *p = (const __be32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_img_start = adap->params.sf_fw_start; @@ -2315,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); + *data++ = (__force __be32) t4_read_reg(adap, + (MEMWIN0_BASE + off + i)); return 0; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 4d6fe604fa6..d23755ea9bc 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, /* Allocate Tx/Rx descriptor memory */ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); - if (!db->desc_pool_ptr) + if (!db->desc_pool_ptr) { + err = -ENOMEM; goto err_out_res; + } db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); - if (!db->buf_pool_ptr) + if (!db->buf_pool_ptr) { + err = -ENOMEM; goto err_out_free_desc; + } db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; db->first_tx_desc_dma = db->desc_pool_dma_ptr; @@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, db->chip_id = ent->driver_data; /* IO type range. */ db->ioaddr = pci_iomap(pdev, 0, 0); - if (!db->ioaddr) + if (!db->ioaddr) { + err = -ENOMEM; goto err_out_free_buf; + } db->chip_revision = pdev->revision; db->wol_mode = 0; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eb3f2cb3b93..d1b6cc58763 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter) ue_hi = (ue_hi & ~ue_hi_mask); } - if (ue_lo || ue_hi || - sliport_status & SLIPORT_STATUS_ERR_MASK) { + /* On certain platforms BE hardware can indicate spurious UEs. + * Allow the h/w to stop working completely in case of a real UE. + * Hence not setting the hw_error for UE detection. + */ + if (sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->hw_error = true; dev_err(&adapter->pdev->dev, "Error detected in the card\n"); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a1b52ec3b93..1d03dcdd5e5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv) sizeof(struct rxbd8) * priv->total_rx_ring_size, priv->tx_queue[0]->tx_bd_base, priv->tx_queue[0]->tx_bd_dma_base); - skb_queue_purge(&priv->rx_recycle); } void gfar_start(struct net_device *dev) @@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev) enable_napi(priv); - skb_queue_head_init(&priv->rx_recycle); - /* Initialize a bunch of registers */ init_registers(dev); @@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bytes_sent += skb->len; - /* If there's room in the queue (limit it to rx_buffer_size) - * we add this skb back into the pool, if it's the right size - */ - if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && - skb_recycle_check(skb, priv->rx_buffer_size + - RXBUF_ALIGNMENT)) { - gfar_align_skb(skb); - skb_queue_head(&priv->rx_recycle, skb); - } else - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb); tx_queue->tx_skbuff[skb_dirtytx] = NULL; @@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, static struct sk_buff *gfar_alloc_skb(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); if (!skb) @@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev) struct sk_buff *gfar_new_skb(struct net_device *dev) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb = NULL; - - skb = skb_dequeue(&priv->rx_recycle); - if (!skb) - skb = gfar_alloc_skb(dev); - - return skb; + return gfar_alloc_skb(dev); } static inline void count_errors(unsigned short status, struct net_device *dev) @@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; else if (skb) - skb_queue_head(&priv->rx_recycle, skb); + dev_kfree_skb(skb); } else { /* Increment the number of packets */ rx_queue->stats.rx_packets++; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 4141ef2ddaf..22eabc13ca9 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1080,8 +1080,6 @@ struct gfar_private { u32 cur_filer_idx; - struct sk_buff_head rx_recycle; - /* RX queue filer rule set*/ struct ethtool_rx_list rx_list; struct mutex rx_queue_access; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index b9db0e04056..2e5daee0438 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) pr_err("no resource\n"); goto no_resource; } - if (request_resource(&ioport_resource, etsects->rsrc)) { + if (request_resource(&iomem_resource, etsects->rsrc)) { pr_err("resource busy\n"); goto no_resource; } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 16428843922..0a70bb55d1b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh) static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 __iomem *bd) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; - skb = __skb_dequeue(&ugeth->rx_recycle); + skb = netdev_alloc_skb(ugeth->ndev, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); if (!skb) - skb = netdev_alloc_skb(ugeth->ndev, - ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT); - if (skb == NULL) return NULL; /* We need the data buffer to be aligned properly. We will reserve @@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) iounmap(ugeth->ug_regs); ugeth->ug_regs = NULL; } - - skb_queue_purge(&ugeth->rx_recycle); } static void ucc_geth_set_multi(struct net_device *dev) @@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) return -ENOMEM; } - skb_queue_head_init(&ugeth->rx_recycle); - return 0; } @@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit if (netif_msg_rx_err(ugeth)) ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", __func__, __LINE__, (u32) skb); - if (skb) { - skb->data = skb->head + NET_SKB_PAD; - skb->len = 0; - skb_reset_tail_pointer(skb); - __skb_queue_head(&ugeth->rx_recycle, skb); - } + dev_kfree_skb(skb); ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; dev->stats.rx_dropped++; @@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) dev->stats.tx_packets++; - if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && - skb_recycle_check(skb, - ugeth->ug_info->uf_info.max_rx_buf_length + - UCC_GETH_RX_DATA_BUF_ALIGNMENT)) - __skb_queue_head(&ugeth->rx_recycle, skb); - else - dev_kfree_skb(skb); + dev_kfree_skb(skb); ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; ugeth->skb_dirtytx[txQ] = diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index f71b3e7b12d..75f337163ce 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -1214,8 +1214,6 @@ struct ucc_geth_private { /* index of the first skb which hasn't been transmitted yet. */ u16 skb_dirtytx[NUM_TX_QUEUES]; - struct sk_buff_head rx_recycle; - struct ugeth_mii_info *mii_info; struct phy_device *phydev; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cb3356c9af8..04668b47a1d 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -175,13 +175,13 @@ struct e1000_info; /* * in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when - * WTHRESH=4, and since we want 64 bytes at a time written back, set - * it to 5 + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 */ #define E1000_TXDCTL_DMA_BURST_ENABLE \ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ E1000_TXDCTL_COUNT_DESC | \ - (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 16) | /* wthresh must be +1 more than desired */\ (1 << 8) | /* hthresh */ \ 0x1f) /* pthresh */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index ed5b40985ed..d37bfd96c98 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -412,6 +412,8 @@ enum e1e_registers { #define E1000_DEV_ID_PCH2_LV_V 0x1503 #define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fb659dd8db0..f444eb0b76d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) * set up some performance related parameters to encourage the * hardware to use the bus more efficiently in bursts, depends * on the tx_int_delay to be enabled, - * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls * hthresh = 1 ==> prefetch when one or more available * pthresh = 0x1f ==> prefetch if internal cache 31 or less * BEWARE: this seems to work but should be considered first if @@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5bd26763554..30efc9f0f47 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) #define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ #ifdef IXGBE_FCOE /* Use 3K as the baby jumbo frame size for FCoE */ #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 56b20d17d0e..116f0e901be 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2673,6 +2673,9 @@ static int ixgbe_get_ts_info(struct net_device *dev, case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 383b4e1cd17..4a9c9c28568 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -175,7 +175,7 @@ struct ixgbevf_q_vector { #define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 0ee9bd4819f..de1ad506665 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1747,6 +1747,7 @@ err_tx_ring_allocation: **/ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { + struct net_device *netdev = adapter->netdev; int err = 0; int vector, v_budget; @@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ixgbevf_acquire_msix_vectors(adapter, v_budget); + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto out; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + out: return err; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index c911d883c27..f8064df10cc 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev, /* * set up PCI device basics */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + rc = pci_enable_device(pdev); if (rc) { pr_err("Cannot enable PCI device\n"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 087b9e0669f..84c13263c51 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -412,7 +412,6 @@ struct mv643xx_eth_private { u8 work_rx_refill; int skb_size; - struct sk_buff_head rx_recycle; /* * RX state. @@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) struct rx_desc *rx_desc; int size; - skb = __skb_dequeue(&mp->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb(mp->dev, mp->skb_size); + skb = netdev_alloc_skb(mp->dev, mp->skb_size); if (skb == NULL) { mp->oom = 1; @@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) desc->byte_cnt, DMA_TO_DEVICE); } - if (skb != NULL) { - if (skb_queue_len(&mp->rx_recycle) < - mp->rx_ring_size && - skb_recycle_check(skb, mp->skb_size)) - __skb_queue_head(&mp->rx_recycle, skb); - else - dev_kfree_skb(skb); - } + dev_kfree_skb(skb); } __netif_tx_unlock(nq); @@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev) napi_enable(&mp->napi); - skb_queue_head_init(&mp->rx_recycle); - mp->int_mask = INT_EXT; for (i = 0; i < mp->rxq_count; i++) { @@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev) mib_counters_update(mp); del_timer_sync(&mp->mib_counters_timer); - skb_queue_purge(&mp->rx_recycle); - for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5a30bf82309..9b9c2ac5c4c 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) if (work_done < to_do) { unsigned long flags; - napi_gro_flush(napi); + napi_gro_flush(napi, false); spin_lock_irqsave(&hw->hw_lock, flags); __napi_complete(napi); hw->intr_mask |= napimask[skge->port]; @@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev, skge_board_name(hw), hw->chip_rev); dev = skge_devinit(hw, 0, using_dac); - if (!dev) + if (!dev) { + err = -ENOMEM; goto err_out_led_off; + } /* Some motherboards are broken and has zero in ROM. */ if (!is_valid_ether_addr(dev->dev_addr)) @@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = { DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, {} }; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 2b0748dba8b..78946feab4a 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, if (~reg == 0) { dev_err(&pdev->dev, "PCI configuration read error\n"); + err = -EIO; goto err_out; } @@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev, hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), &hw->st_dma); - if (!hw->st_le) + if (!hw->st_le) { + err = -ENOMEM; goto err_out_reset; + } dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c10e3a6de09..b35094c590b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); - mlx4_qp_release_range(mdev->dev, ring->qpn, 1); mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); @@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (bounce) tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 51c764901ad..b84a88bc44d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, ctx = &priv->mfunc.master.slave_state[slave]; spin_lock_irqsave(&ctx->lock, flags); - mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n", - __func__, slave, cur_state, event); - switch (cur_state) { case SLAVE_PORT_DOWN: if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) @@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, goto out; } ret = mlx4_get_slave_port_state(dev, slave, port); - mlx4_dbg(dev, "%s: slave: %d, current state: %d new event" - " :%d gen_event: %d\n", - __func__, slave, cur_state, event, *gen_event); out: spin_unlock_irqrestore(&ctx->lock, flags); @@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); } +static void mlx4_unmap_uar(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + if (priv->eq_table.uar_map[i]) { + iounmap(priv->eq_table.uar_map[i]); + priv->eq_table.uar_map[i] = NULL; + } +} + static int mlx4_create_eq(struct mlx4_dev *dev, int nent, u8 intr, struct mlx4_eq *eq) { @@ -1207,6 +1213,7 @@ err_out_unmap: mlx4_free_irqs(dev); err_out_bitmap: + mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); err_out_free: @@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); - for (i = 0; i < mlx4_num_eq_uar(dev); ++i) - if (priv->eq_table.uar_map[i]) - iounmap(priv->eq_table.uar_map[i]); - + mlx4_unmap_uar(dev); mlx4_bitmap_cleanup(&priv->eq_table.bitmap); kfree(priv->eq_table.uar_map); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 80df2ab0177..2aa80afd98d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1405,7 +1405,10 @@ unmap_bf: unmap_bf_area(dev); err_close: - mlx4_close_hca(dev); + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else + mlx4_CLOSE_HCA(dev, 0); err_free_icm: if (!mlx4_is_slave(dev)) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 926c911c0ac..b05705f50f0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave, new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; *(u8 *)(inbox->buf + 35) = new_index; - - mlx4_dbg(dev, "port = %d, orig pkey index = %d, " - "new pkey index = %d\n", port, orig_index, new_index); } static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, @@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) qp_ctx->alt_path.mgid_index = slave & 0x7F; } - - mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", - slave, qp_ctx->pri_path.mgid_index); } static int mpt_mask(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 5b61d12f8b9..dbaaa99a0d4 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, i = register_netdev(dev); if (i) goto err_register_netdev; - - if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) + i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); + if (i) goto err_create_file; if (netif_msg_drv(np)) { diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index e01c0a07a93..7dfe88398d7 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev) if (lp->descriptors == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for " " descriptors.\n", dev_name(lp->device)); + err = -ENOMEM; goto out; } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 53743f7a2ca..af8b4142088 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) pldat->dma_buff_base_p); free_irq(ndev->irq, ndev); iounmap(pldat->net_base); + mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); clk_disable(pldat->clk); clk_put(pldat->clk); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 97302419a37..5296cc8d3cb 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -26,6 +26,9 @@ if PCH_GBE config PCH_PTP bool "PCH PTP clock support" default n + depends on EXPERIMENTAL + select PPS + select PTP_1588_CLOCK select PTP_1588_CLOCK_PCH ---help--- Say Y here if you want to use Precision Time Protocol (PTP) in the diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index b2a94d02a52..4c4fe5b1a29 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) } /** - * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context - * @reg: Pointer of register - * @busy: Busy bit - */ -static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) -{ - u32 tmp; - int ret = -1; - /* wait busy */ - tmp = 20; - while ((ioread32(reg) & bit) && --tmp) - udelay(5); - if (!tmp) - pr_err("Error: busy bit is not cleared\n"); - else - ret = 0; - return ret; -} - -/** * pch_gbe_mac_mar_set - Set MAC address register * @hw: Pointer to the HW structure * @addr: Pointer to the MAC address @@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) return; } -static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) +static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw) { - /* Read the MAC addresses. and store to the private data */ - pch_gbe_mac_read_mac_addr(hw); - iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); - pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); - /* Setup the MAC addresses */ - pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); - return; + u32 rctl; + /* Disables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); +} + +static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw) +{ + u32 rctl; + /* Enables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); } /** @@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) { struct pch_gbe_hw *hw = &adapter->hw; - u32 rdba, rdlen, rctl, rxdma; + u32 rdba, rdlen, rxdma; pr_debug("dma adr = 0x%08llx size = 0x%08x\n", (unsigned long long)adapter->rx_ring->dma, @@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) pch_gbe_mac_force_mac_fc(hw); - /* Disables Receive MAC */ - rctl = ioread32(&hw->reg->MAC_RX_EN); - iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); + pch_gbe_disable_mac_rx(hw); /* Disables Receive DMA */ rxdma = ioread32(&hw->reg->DMA_CTRL); @@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } -static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) +static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw) { - struct pch_gbe_hw *hw = &adapter->hw; u32 rxdma; - u16 value; - int ret; /* Disable Receive DMA */ rxdma = ioread32(&hw->reg->DMA_CTRL); rxdma &= ~PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Wait Rx DMA BUS is IDLE */ - ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); - if (ret) { - /* Disable Bus master */ - pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); - value &= ~PCI_COMMAND_MASTER; - pci_write_config_word(adapter->pdev, PCI_COMMAND, value); - /* Stop Receive */ - pch_gbe_mac_reset_rx(hw); - /* Enable Bus master */ - value |= PCI_COMMAND_MASTER; - pci_write_config_word(adapter->pdev, PCI_COMMAND, value); - } else { - /* Stop Receive */ - pch_gbe_mac_reset_rx(hw); - } - /* reprogram multicast address register after reset */ - pch_gbe_set_multi(adapter->netdev); } -static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw) { u32 rxdma; @@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw) rxdma = ioread32(&hw->reg->DMA_CTRL); rxdma |= PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Enables Receive */ - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); - return; } /** @@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), &hw->reg->INT_EN); - pch_gbe_stop_receive(adapter); + pch_gbe_disable_dma_rx(&adapter->hw); int_st |= ioread32(&hw->reg->INT_ST); int_st = int_st & ioread32(&hw->reg->INT_EN); } @@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; - int err; + int err = -EINVAL; /* Ensure we have a valid MAC */ if (!is_valid_ether_addr(adapter->hw.mac.addr)) { pr_err("Error: Invalid MAC address\n"); - return -EINVAL; + goto out; } /* hardware has been reset, we need to reload some things */ @@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) err = pch_gbe_request_irq(adapter); if (err) { - pr_err("Error: can't bring device up\n"); - return err; + pr_err("Error: can't bring device up - irq request failed\n"); + goto out; } err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); if (err) { - pr_err("Error: can't bring device up\n"); - return err; + pr_err("Error: can't bring device up - alloc rx buffers pool failed\n"); + goto freeirq; } pch_gbe_alloc_tx_buffers(adapter, tx_ring); pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); adapter->tx_queue_len = netdev->tx_queue_len; - pch_gbe_start_receive(&adapter->hw); + pch_gbe_enable_dma_rx(&adapter->hw); + pch_gbe_enable_mac_rx(&adapter->hw); mod_timer(&adapter->watchdog_timer, jiffies); @@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) netif_start_queue(adapter->netdev); return 0; + +freeirq: + pch_gbe_free_irq(adapter); +out: + return err; } /** @@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) int work_done = 0; bool poll_end_flag = false; bool cleaned = false; - u32 int_en; pr_debug("budget : %d\n", budget); @@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) if (poll_end_flag) { napi_complete(napi); - if (adapter->rx_stop_flag) { - adapter->rx_stop_flag = false; - pch_gbe_start_receive(&adapter->hw); - } pch_gbe_irq_enable(adapter); - } else - if (adapter->rx_stop_flag) { - adapter->rx_stop_flag = false; - pch_gbe_start_receive(&adapter->hw); - int_en = ioread32(&adapter->hw.reg->INT_EN); - iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), - &adapter->hw.reg->INT_EN); - } + } + + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_enable_dma_rx(&adapter->hw); + } pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", poll_end_flag, work_done, budget); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index df09b1cb742..6407d0d77e8 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + /* The barrier is required to ensure request and response queue + * addr writes to the registers. + */ + wmb(); + qdev->req_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->req_q_size, @@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) return -ENOMEM; } - qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); - qdev->rsp_q_virt_addr = pci_alloc_consistent(qdev->pdev, (size_t) qdev->rsp_q_size, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 473ce134ca6..24ad17ec7fc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->netdev = netdev; adapter->pdev = pdev; - if (qlcnic_alloc_adapter_resources(adapter)) + err = qlcnic_alloc_adapter_resources(adapter); + if (err) goto err_out_free_netdev; adapter->dev_rst_time = jiffies; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 995d0cfc4c0..1c818254b7b 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -563,7 +563,7 @@ rx_next: if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - napi_gro_flush(napi); + napi_gro_flush(napi, false); spin_lock_irqsave(&cp->lock, flags); __napi_complete(napi); cpw16_f(IntrMask, cp_intr_mask); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bad8f2eec9b..c8bfea0524d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!rtsu) { dev_err(&pdev->dev, "Not found TSU resource\n"); + ret = -ENODEV; goto out_release; } mdp->tsu_addr = ioremap(rtsu->start, diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 5b3dd028ce8..0767043f44a 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) evt = list_entry(cursor, struct efx_ptp_event_rx, link); if (time_after(jiffies, evt->expiry)) { - list_del(&evt->link); - list_add(&evt->link, &ptp->evt_free_list); + list_move(&evt->link, &ptp->evt_free_list); netif_warn(efx, hw, efx->net_dev, "PTP rx event dropped\n"); } @@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, match->state = PTP_PACKET_STATE_MATCHED; rc = PTP_PACKET_STATE_MATCHED; - list_del(&evt->link); - list_add(&evt->link, &ptp->evt_free_list); + list_move(&evt->link, &ptp->evt_free_list); break; } } @@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx) /* Drop any pending receive events */ spin_lock_bh(&efx->ptp_data->evt_lock); list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { - list_del(cursor); - list_add(cursor, &efx->ptp_data->evt_free_list); + list_move(cursor, &efx->ptp_data->evt_free_list); } spin_unlock_bh(&efx->ptp_data->evt_lock); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 203d9c6ec23..fb9f6b38511 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, /* IO region. */ ioaddr = pci_iomap(pci_dev, 0, 0); - if (!ioaddr) + if (!ioaddr) { + ret = -ENOMEM; goto err_out_cleardev; + } sis_priv = netdev_priv(net_dev); sis_priv->ioaddr = ioaddr; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e872e1da313..7d51a65ab09 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -50,7 +50,6 @@ struct stmmac_priv { unsigned int dirty_rx; struct sk_buff **rx_skbuff; dma_addr_t *rx_skbuff_dma; - struct sk_buff_head rx_recycle; struct net_device *dev; dma_addr_t dma_rx_phy; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3be88331d17..c6cdbc4eb05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv) priv->hw->ring->clean_desc3(p); if (likely(skb != NULL)) { - /* - * If there's room in the queue (limit it to size) - * we add this skb back into the pool, - * if it's the right size. - */ - if ((skb_queue_len(&priv->rx_recycle) < - priv->dma_rx_size) && - skb_recycle_check(skb, priv->dma_buf_sz)) - __skb_queue_head(&priv->rx_recycle, skb); - else - dev_kfree_skb(skb); - + dev_kfree_skb(skb); priv->tx_skbuff[entry] = NULL; } @@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev) priv->eee_enabled = stmmac_eee_init(priv); napi_enable(&priv->napi); - skb_queue_head_init(&priv->rx_recycle); netif_start_queue(dev); return 0; @@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev) kfree(priv->tm); #endif napi_disable(&priv->napi); - skb_queue_purge(&priv->rx_recycle); /* Free the IRQ lines */ free_irq(dev->irq, dev); @@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) if (likely(priv->rx_skbuff[entry] == NULL)) { struct sk_buff *skb; - skb = __skb_dequeue(&priv->rx_recycle); - if (skb == NULL) - skb = netdev_alloc_skb_ip_align(priv->dev, - bfsize); + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); if (unlikely(skb == NULL)) break; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8419bf385e0..275b430aeb7 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); + err = -ENODEV; goto err_out_free_res; } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 9ae12d0c963..6c8695ec7cb 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev, goto err_out_iounmap; } - if (gem_get_device_address(gp)) + err = gem_get_device_address(gp); + if (err) goto err_out_free_consistent; dev->netdev_ops = &gem_netdev_ops; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index b26cbda5efa..2c41894d547 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) + depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX)) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 4e2a1628484..4e981001385 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb) { struct skb_shared_info *sh = skb_shinfo(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ long n; /* size of the current piece of payload */ int num_edescs = 0; int segment; @@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb) /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; + f_size = skb_frag_size(&sh->frags[f_id]); f_used = 0; } @@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, struct iphdr *ih; struct tcphdr *th; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned char *data = skb->data; unsigned int ih_off, th_off, p_len; unsigned int isum_seed, tsum_seed, id, seq; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ long n; /* size of the current piece of payload */ int segment; @@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, isum_seed = ((0xFFFF - ih->check) + (0xFFFF - ih->tot_len) + (0xFFFF - ih->id)); - tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); + tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); id = ntohs(ih->id); seq = ntohl(th->seq); @@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; + f_size = skb_frag_size(&sh->frags[f_id]); f_used = 0; } @@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, struct tile_net_priv *priv = netdev_priv(dev); struct skb_shared_info *sh = skb_shinfo(skb); unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; + unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; gxio_mpipe_edesc_t edesc_head = { { 0 } }; gxio_mpipe_edesc_t edesc_body = { { 0 } }; long f_id = -1; /* id of the current fragment */ - long f_size = skb->hdr_len; /* size of the current fragment */ - long f_used = sh_len; /* bytes used from the current fragment */ - void *f_data = skb->data; + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + void *f_data = skb->data + sh_len; long n; /* size of the current piece of payload */ unsigned long tx_packets = 0, tx_bytes = 0; unsigned int csum_start; @@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, /* Egress the payload. */ while (p_used < p_len) { + void *va; /* Advance as needed. */ while (f_used >= f_size) { f_id++; - f_size = sh->frags[f_id].size; - f_used = 0; + f_size = skb_frag_size(&sh->frags[f_id]); f_data = tile_net_frag_buf(&sh->frags[f_id]); + f_used = 0; } + va = f_data + f_used; + /* Use bytes from the current fragment. */ n = p_len - p_used; if (n > f_size - f_used) @@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, p_used += n; /* Egress a piece of the payload. */ - edesc_body.va = va_to_tile_io_addr(f_data) + f_used; + edesc_body.va = va_to_tile_io_addr(va); edesc_body.xfer_size = n; edesc_body.bound = !(p_used < p_len); gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); |