diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 887 |
1 files changed, 605 insertions, 282 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ba5d3fe753b..6e6db955b4a 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.102" -#define DRV_MODULE_RELDATE "September 1, 2009" +#define DRV_MODULE_VERSION "3.104" +#define DRV_MODULE_RELDATE "November 13, 2009" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -137,6 +137,12 @@ #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) +#define TG3_RX_STD_BUFF_RING_SIZE \ + (sizeof(struct ring_info) * TG3_RX_RING_SIZE) + +#define TG3_RX_JMB_BUFF_RING_SIZE \ + (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) + /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) @@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) TG3_64BIT_REG_LOW, val); return; } - if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { + if (off == TG3_RX_STD_PROD_IDX_REG) { pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + TG3_64BIT_REG_LOW, val); return; @@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case TG3_PHY_ID_BCM50610: + case TG3_PHY_ID_BCM50610M: val = MAC_PHYCFG2_50610_LED_MODES; break; case TG3_PHY_ID_BCMAC131: @@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp) if (is_serdes) tp->phy_addr += 7; } else - tp->phy_addr = PHY_ADDR; + tp->phy_addr = TG3_PHY_MII_ADDR; if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) @@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->reset = &tg3_mdio_reset; - tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); tp->mdio_bus->irq = &tp->mdio_irq[0]; for (i = 0; i < PHY_MAX_ADDR; i++) @@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (!phydev || !phydev->drv) { printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); @@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp) switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case TG3_PHY_ID_BCM57780: phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; break; case TG3_PHY_ID_BCM50610: + case TG3_PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) @@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp) case TG3_PHY_ID_RTL8201E: case TG3_PHY_ID_BCMAC131: phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; break; } @@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) - autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; else autoneg = tp->link_config.autoneg; @@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; spin_lock_bh(&tp->lock); @@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) mac_mode |= MAC_MODE_PORT_MODE_MII; - else + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; @@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; /* Attach the MAC to the PHY. */ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, @@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); return -EINVAL; } @@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (tp->link_config.phy_is_low_power) { tp->link_config.phy_is_low_power = 0; @@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; } } @@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); udelay(40); return; + } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; } else if (do_low_power) { tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); @@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) static void tg3_enable_nvram_access(struct tg3 *tp) { if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); @@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) static void tg3_disable_nvram_access(struct tg3 *tp) { if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); @@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; tp->link_config.phy_is_low_power = 1; @@ -3243,15 +3283,6 @@ relink: pci_write_config_word(tp->pdev, tp->pcie_cap + PCI_EXP_LNKCTL, newlnkctl); - } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { - u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL); - if (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_10) - newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; - else - newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN; - if (newreg != oldreg) - tw32(TG3_PCIE_LNKCTL, newreg); } if (current_link_up != netif_carrier_ok(tp->dev)) { @@ -4375,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi) } } +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +{ + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; +} + /* Returns size of skb allocated or < 0 on error. * * We only need to fill in the address because the other members @@ -4386,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, - int src_idx, u32 dest_idx_unmasked) +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) { - struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *desc; struct ring_info *map, *src_map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; src_map = NULL; switch (opaque_key) { @@ -4403,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; - if (src_idx >= 0) - src_map = &tpr->rx_std_buffers[src_idx]; skb_size = tp->rx_pkt_map_sz; break; @@ -4412,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; - if (src_idx >= 0) - src_map = &tpr->rx_jmb_buffers[src_idx]; skb_size = TG3_RX_JMB_MAP_SZ; break; @@ -4435,13 +4471,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } map->skb = skb; pci_unmap_addr_set(map, mapping, mapping); - if (src_map != NULL) - src_map->skb = NULL; - desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); @@ -4452,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, * members of the RX descriptor are invariant. See notes above * tg3_alloc_rx_skb for full details. */ -static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, - int src_idx, u32 dest_idx_unmasked) +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) { struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; int dest_idx; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + struct tg3_rx_prodring_set *spr = &tp->prodring[0]; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - dest_desc = &tpr->rx_std[dest_idx]; - dest_map = &tpr->rx_std_buffers[dest_idx]; - src_desc = &tpr->rx_std[src_idx]; - src_map = &tpr->rx_std_buffers[src_idx]; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - dest_desc = &tpr->rx_jmb[dest_idx].std; - dest_map = &tpr->rx_jmb_buffers[dest_idx]; - src_desc = &tpr->rx_jmb[src_idx].std; - src_map = &tpr->rx_jmb_buffers[src_idx]; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; break; default: @@ -4487,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, pci_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; dest_desc->addr_lo = src_desc->addr_lo; - src_map->skb = NULL; } @@ -4519,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) { struct tg3 *tp = tnapi->tp; u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; - struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; + struct tg3_rx_prodring_set *tpr = tnapi->prodring; hw_idx = *(tnapi->rx_rcb_prod_idx); /* @@ -4532,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) rmb(); work_mask = 0; received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; unsigned int len; struct sk_buff *skb; @@ -4542,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; + ri = &tp->prodring[0].rx_std_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; - post_ptr = &tpr->rx_std_ptr; + post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; + ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; - post_ptr = &tpr->rx_jmb_ptr; + post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; @@ -4560,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { drop_it: - tg3_recycle_rx(tnapi, opaque_key, + tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ @@ -4580,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ) { int skb_size; - skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, - desc_idx, *post_ptr); + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); if (skb_size < 0) goto drop_it; + ri->skb = NULL; + pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); @@ -4592,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) } else { struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, opaque_key, + tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); copy_skb = netdev_alloc_skb(tp->dev, @@ -4643,9 +4687,7 @@ next_pkt: if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { u32 idx = *post_ptr % TG3_RX_RING_SIZE; - - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + - TG3_64BIT_REG_LOW, idx); + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); work_mask &= ~RXD_OPAQUE_RING_STD; rx_std_posted = 0; } @@ -4665,33 +4707,45 @@ next_pkt_nopost: tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ - if (work_mask & RXD_OPAQUE_RING_STD) { - sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, - sw_idx); - } - if (work_mask & RXD_OPAQUE_RING_JUMBO) { - sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; - tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, - sw_idx); + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx % + TG3_RX_JUMBO_RING_SIZE; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; + tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; + + napi_schedule(&tp->napi[1].napi); } - mmiowb(); return received; } -static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +static void tg3_poll_link(struct tg3 *tp) { - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - /* handle link change and other phy events */ if (!(tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); + (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { tw32_f(MAC_STATUS, @@ -4705,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) spin_unlock(&tp->lock); } } +} + +static void tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) +{ + u32 si, di, cpycnt, src_prod_idx; + int i; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % + TG3_RX_RING_SIZE; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % + TG3_RX_RING_SIZE; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % + TG3_RX_JUMBO_RING_SIZE; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % + TG3_RX_JUMBO_RING_SIZE; + } +} + +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +{ + struct tg3 *tp = tnapi->tp; /* run TX completion thread */ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { @@ -4720,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_done += tg3_rx(tnapi, budget - work_done); + if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { + int i; + u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; + u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; + + for (i = 2; i < tp->irq_cnt; i++) + tg3_rx_prodring_xfer(tp, tnapi->prodring, + tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { + u32 mbox = TG3_RX_STD_PROD_IDX_REG; + tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); + } + + if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { + u32 mbox = TG3_RX_JMB_PROD_IDX_REG; + tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); + } + + mmiowb(); + } + + return work_done; +} + +static int tg3_poll_msix(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_restart_ints() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); return work_done; } @@ -4731,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget) struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { + tg3_poll_link(tp); + work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) @@ -5093,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); /* Workaround 4GB and 40-bit hardware DMA bugs. */ -static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, - u32 last_plus_one, u32 *start, - u32 base_flags, u32 mss) +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + struct sk_buff *skb, u32 last_plus_one, + u32 *start, u32 base_flags, u32 mss) { - struct tg3_napi *tnapi = &tp->napi[0]; + struct tg3 *tp = tnapi->tp; struct sk_buff *new_skb; dma_addr_t new_addr = 0; u32 entry = *start; @@ -5124,7 +5340,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, /* Make sure new skb does not cross any 4G boundaries. * Drop the packet if it does. */ - if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { + if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(new_addr, new_skb->len))) { if (!ret) skb_dma_unmap(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); @@ -5179,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry, } /* hard_start_xmit for devices that don't have any bugs and - * support TG3_FLG2_HW_TSO_2 only. + * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. */ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -5238,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, hdrlen = ip_tcp_len + tcp_opt_len; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { mss |= (hdrlen & 0xc) << 12; if (hdrlen & 0x10) base_flags |= 0x00000010; @@ -5365,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, struct skb_shared_info *sp; int would_hit_hwbug; dma_addr_t mapping; - struct tg3_napi *tnapi = &tp->napi[0]; + struct tg3_napi *tnapi; + struct netdev_queue *txq; - len = skb_headlen(skb); + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + tnapi++; /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software @@ -5375,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, * no IRQ context deadlocks to worry about either. Rejoice! */ if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); /* This is a hard error, log it. */ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " @@ -5389,10 +5610,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, base_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; - mss = 0; + if ((mss = skb_shinfo(skb)->gso_size) != 0) { struct iphdr *iph; - int tcp_opt_len, ip_tcp_len, hdr_len; + u32 tcp_opt_len, ip_tcp_len, hdr_len; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -5423,8 +5644,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, IPPROTO_TCP, 0); - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) + mss |= hdr_len << 9; + else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tcp_opt_len || iph->ihl > 5) { int tsflags; @@ -5446,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, (vlan_tx_tag_get(skb) << 16)); #endif + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && + !mss && skb->len > ETH_DATA_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { dev_kfree_skb(skb); goto out_unlock; @@ -5459,9 +5691,20 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, would_hit_hwbug = 0; - if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) + len = skb_headlen(skb); + + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) would_hit_hwbug = 1; - else if (tg3_4g_overflow_test(mapping, len)) + + if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(mapping, len)) + would_hit_hwbug = 1; + + if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + tg3_40bit_overflow_test(tp, mapping, len)) + would_hit_hwbug = 1; + + if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) would_hit_hwbug = 1; tg3_set_txd(tnapi, entry, mapping, len, base_flags, @@ -5482,10 +5725,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, tnapi->tx_buffers[entry].skb = NULL; - if (tg3_4g_overflow_test(mapping, len)) + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && + len <= 8) would_hit_hwbug = 1; - if (tg3_40bit_overflow_test(tp, mapping, len)) + if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && + tg3_4g_overflow_test(mapping, len)) + would_hit_hwbug = 1; + + if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && + tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) @@ -5509,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ - if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, + if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, &start, base_flags, mss)) goto out_unlock; @@ -5517,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); + tw32_tx_mbox(tnapi->prodmbox, entry); tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_stop_queue(dev); + netif_tx_stop_queue(txq); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_wake_queue(tp->dev); + netif_tx_wake_queue(txq); } out_unlock: @@ -5594,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { int i; - struct ring_info *rxp; - - for (i = 0; i < TG3_RX_RING_SIZE; i++) { - rxp = &tpr->rx_std_buffers[i]; - if (rxp->skb == NULL) - continue; + if (tpr != &tp->prodring[0]) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) % TG3_RX_RING_SIZE) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } - pci_unmap_single(tp->pdev, - pci_unmap_addr(rxp, mapping), - tp->rx_pkt_map_sz, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(rxp->skb); - rxp->skb = NULL; + return; } - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { - rxp = &tpr->rx_jmb_buffers[i]; + for (i = 0; i < TG3_RX_RING_SIZE; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); - if (rxp->skb == NULL) - continue; - - pci_unmap_single(tp->pdev, - pci_unmap_addr(rxp, mapping), - TG3_RX_JMB_MAP_SZ, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(rxp->skb); - rxp->skb = NULL; - } + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); } } @@ -5638,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { u32 i, rx_pkt_dma_sz; - struct tg3_napi *tnapi = &tp->napi[0]; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->prodring[0]) { + memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); + if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE); + goto done; + } /* Zero out all descriptors. */ memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); @@ -5665,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX standard ring, " "only %d out of %d buffers were allocated " @@ -5696,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, - -1, i) < 0) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, + i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX jumbo ring, " "only %d out of %d buffers were " @@ -5741,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, static int tg3_rx_prodring_init(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { - tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * - TG3_RX_RING_SIZE, GFP_KERNEL); + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); if (!tpr->rx_std_buffers) return -ENOMEM; @@ -5752,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp, goto err_out; if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * - TG3_RX_JUMBO_RING_SIZE, + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, GFP_KERNEL); if (!tpr->rx_jmb_buffers) goto err_out; @@ -5809,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } - } - tg3_rx_prodring_free(tp, &tp->prodring[0]); + if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) + tg3_rx_prodring_free(tp, &tp->prodring[j]); + } } /* Initialize tx/rx rings for packet processing. @@ -5845,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp) tnapi->rx_rcb_ptr = 0; if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && + tg3_rx_prodring_alloc(tp, &tp->prodring[i])) + return -ENOMEM; } - return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); + return 0; } /* @@ -5891,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp) tp->hw_stats = NULL; } - tg3_rx_prodring_fini(tp, &tp->prodring[0]); + for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) + tg3_rx_prodring_fini(tp, &tp->prodring[i]); } /* @@ -5902,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - if (tg3_rx_prodring_init(tp, &tp->prodring[0])) - return -ENOMEM; + for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { + if (tg3_rx_prodring_init(tp, &tp->prodring[i])) + goto err_out; + } tp->hw_stats = pci_alloc_consistent(tp->pdev, sizeof(struct tg3_hw_stats), @@ -5947,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp) break; } + if (tp->irq_cnt == 1) + tnapi->prodring = &tp->prodring[0]; + else if (i) + tnapi->prodring = &tp->prodring[i - 1]; + /* * If multivector RSS is enabled, vector 0 does not handle * rx or tx interrupts. Don't allocate any resources for it. @@ -6580,6 +6849,30 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_mdio_start(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + u8 phy_addr; + + phy_addr = tp->phy_addr; + tp->phy_addr = TG3_PHY_PCIE_ADDR; + + tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, + TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); + val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | + TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | + TG3_PCIEPHY_TX0CTRL1_NB_EN; + tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); + udelay(10); + + tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, + TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); + val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | + TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; + tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); + udelay(10); + + tp->phy_addr = phy_addr; + } + if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && @@ -7162,15 +7455,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); - } - if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { - val = tr32(TG3_PCIE_LNKCTL); - if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) - val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; - else - val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; - tw32(TG3_PCIE_LNKCTL, val); + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); } /* This works around an issue with Athlon chipsets on @@ -7217,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { /* This value is determined during the probe time DMA * engine test, tg3_test_dma. */ @@ -7342,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); - tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_BUFFER_DESC); + if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); /* Disable the mini ring */ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) @@ -7366,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | BDINFO_FLAGS_USE_EXT_RECV); - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_JUMBO_BUFFER_DESC); + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); @@ -7383,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); - tpr->rx_std_ptr = tp->rx_pending; - tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, - tpr->rx_std_ptr); + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); - tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? + tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; - tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, - tpr->rx_jmb_ptr); + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { tw32(STD_REPLENISH_LWM, 32); @@ -7453,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; @@ -7602,6 +7893,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) val |= WDMAC_MODE_STATUS_TAG_FIX; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + tw32_f(WDMAC_MODE, val); udelay(40); @@ -9240,9 +9534,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct tg3 *tp = netdev_priv(dev); if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return -EAGAIN; - return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); } cmd->supported = (SUPPORTED_Autoneg); @@ -9281,9 +9577,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct tg3 *tp = netdev_priv(dev); if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return -EAGAIN; - return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); } if (cmd->autoneg != AUTONEG_ENABLE && @@ -9436,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value) return 0; } if ((dev->features & NETIF_F_IPV6_CSUM) && - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { + ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { if (value) { dev->features |= NETIF_F_TSO6; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) dev->features |= NETIF_F_TSO_ECN; } else dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); @@ -9466,7 +9765,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); } else { u32 bmcr; @@ -9585,7 +9884,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[PHY_ADDR]; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; if (epause->rx_pause) { if (epause->tx_pause) @@ -10338,7 +10637,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) for (i = 14; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff); - map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { + dev_kfree_skb(skb); + return -EIO; + } tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | rnapi->coal_now); @@ -10349,7 +10651,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) num_pkts = 0; - tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); + tg3_set_txd(tnapi, tnapi->tx_prod, + skb_shinfo(skb)->dma_head, tx_len, 0, 1); tnapi->tx_prod++; num_pkts++; @@ -10359,8 +10662,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(10); - /* 250 usec to allow enough time on some 10/100 Mbps devices. */ - for (i = 0; i < 25; i++) { + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | coal_now); @@ -10373,7 +10676,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) break; } - pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); + skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); dev_kfree_skb(skb); if (tx_idx != tnapi->tx_prod) @@ -10565,9 +10868,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) int err; if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { + struct phy_device *phydev; if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) return -EAGAIN; - return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, data, cmd); } switch(cmd) { @@ -10887,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: @@ -10928,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; protect = 1; } @@ -11022,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; protect = 1; } @@ -11524,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) tg3_enable_nvram_access(tp); if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) + !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); @@ -12400,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { u32 prod_id_asic_rev; - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S) + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) pci_read_config_dword(tp->pdev, TG3PCI_GEN2_PRODID_ASICREV, &prod_id_asic_rev); @@ -12586,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->dev->features |= NETIF_F_IPV6_CSUM; } + /* Determine TSO capabilities */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; + else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; + else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + tp->irq_max = 1; + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || @@ -12597,25 +12924,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; - } else { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5750 && - tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; } - } - tp->irq_max = 1; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } -#ifdef TG3_NAPI - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; - tp->irq_max = TG3_IRQ_MAX_VECS; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; + else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { + tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; + tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; } -#endif if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || @@ -12926,11 +13250,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; - if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 && - tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) || - tp->pci_chip_rev_id == CHIPREV_ID_57780_A0) - tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD; - err = tg3_mdio_init(tp); if (err) return err; @@ -13220,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) #endif #endif + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + if (!goal) goto out; @@ -13414,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; - int ret; + int ret = 0; buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); if (!buf) { @@ -13427,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + goto out; + if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; @@ -13499,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tg3_switch_clocks(tp); #endif - ret = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) goto out; @@ -13678,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5756: return "5722/5756"; case PHY_ID_BCM5906: return "5906"; case PHY_ID_BCM5761: return "5761"; + case PHY_ID_BCM5717: return "5717"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -13919,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; - rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; - sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - tnapi->tp = tp; - tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; - - tnapi->int_mbox = intmbx; - if (i < 4) - intmbx += 0x8; - else - intmbx += 0x4; - - tnapi->consmbox = rcvmbx; - tnapi->prodmbox = sndmbx; - - if (i) - tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); - else - tnapi->coal_now = HOSTCC_MODE_NOW; - - if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) - break; - - /* - * If we support MSIX, we'll be using RSS. If we're using - * RSS, the first vector only handles link interrupts and the - * remaining vectors handle rx and tx interrupts. Reuse the - * mailbox values for the next iteration. The values we setup - * above are still useful for the single vectored mode. - */ - if (!i) - continue; - - rcvmbx += 0x8; - - if (sndmbx & 0x4) - sndmbx -= 0x4; - else - sndmbx += 0xc; - } - - netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->irq = pdev->irq; @@ -13975,8 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_iounmap; } - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) dev->netdev_ops = &tg3_netdev_ops; else dev->netdev_ops = &tg3_netdev_ops_dma_bug; @@ -14023,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) - tp->fw_needed = FIRMWARE_TG3; - - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + /* Selectively allow TSO based on operating conditions */ + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || + (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - } - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || - tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { - tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; - } else { - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) - tp->fw_needed = FIRMWARE_TG3TSO5; - else - tp->fw_needed = FIRMWARE_TG3TSO; + else { + tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); + tp->fw_needed = NULL; } + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { - if (dev->features & NETIF_F_IP_CSUM) - dev->features |= NETIF_F_TSO; - if ((dev->features & NETIF_F_IPV6_CSUM) && - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && + (dev->features & NETIF_F_IP_CSUM)) + dev->features |= NETIF_F_TSO; + + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { + if (dev->features & NETIF_F_IPV6_CSUM) dev->features |= NETIF_F_TSO6; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) dev->features |= NETIF_F_TSO_ECN; } - if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { @@ -14113,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i < 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) { + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); + } else { + tnapi->coal_now = HOSTCC_MODE_NOW; + netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); + } + + if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + tg3_init_coal(tp); pci_set_drvdata(pdev, dev); @@ -14131,13 +14453,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_bus_string(tp, str), dev->dev_addr); - if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) + if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; printk(KERN_INFO "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", - tp->dev->name, - tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, - dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); - else + tp->dev->name, phydev->drv->name, + dev_name(&phydev->dev)); + } else printk(KERN_INFO "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", tp->dev->name, tg3_phy_string(tp), |