diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 1117 |
1 files changed, 759 insertions, 358 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e3e380f90f8..c25ba273b74 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -15,7 +15,6 @@ * notice is accompanying it. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -69,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.60" -#define DRV_MODULE_RELDATE "June 17, 2006" +#define DRV_MODULE_VERSION "3.66" +#define DRV_MODULE_RELDATE "September 23, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -124,9 +123,6 @@ TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) -#define TX_BUFFS_AVAIL(TP) \ - ((TP)->tx_pending - \ - (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) @@ -153,122 +149,71 @@ module_param(tg3_debug, int, 0); MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); static struct pci_device_id tg3_pci_tbl[] = { - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { 0, } + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {} }; MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); -static struct { +static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[TG3_NUM_STATS] = { { "rx_octets" }, @@ -349,7 +294,7 @@ static struct { { "nic_tx_threshold_hit" } }; -static struct { +static const struct { const char string[ETH_GSTRING_LEN]; } ethtool_test_keys[TG3_NUM_TEST] = { { "nvram test (online) " }, @@ -367,7 +312,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val) static u32 tg3_read32(struct tg3 *tp, u32 off) { - return (readl(tp->regs + off)); + return (readl(tp->regs + off)); } static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) @@ -483,6 +428,16 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) readl(mbox); } +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) +{ + return (readl(tp->regs + off + GRCMBOX_BASE)); +} + +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off + GRCMBOX_BASE); +} + #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) @@ -498,6 +453,10 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + spin_lock_irqsave(&tp->indirect_lock, flags); if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); @@ -519,6 +478,12 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) { unsigned long flags; + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + spin_lock_irqsave(&tp->indirect_lock, flags); if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); @@ -548,6 +513,9 @@ static inline void tg3_cond_int(struct tg3 *tp) if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && (tp->hw_status->status & SD_STATUS_UPDATED)) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coalesce_mode | + (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); } static void tg3_enable_ints(struct tg3 *tp) @@ -588,7 +556,7 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) /* tg3_restart_ints * similar to tg3_enable_ints, but it accurately determines whether there * is new work pending and can return without flushing the PIO write - * which reenables interrupts + * which reenables interrupts */ static void tg3_restart_ints(struct tg3 *tp) { @@ -677,7 +645,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & MI_COM_REG_ADDR_MASK); frame_val |= (MI_COM_CMD_READ | MI_COM_START); - + tw32_f(MAC_MI_COM, frame_val); loops = PHY_BUSY_LOOPS; @@ -713,6 +681,10 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) unsigned int loops; int ret; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) + return 0; + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { tw32_f(MAC_MI_MODE, (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); @@ -725,7 +697,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) MI_COM_REG_ADDR_MASK); frame_val |= (val & MI_COM_DATA_MASK); frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); - + tw32_f(MAC_MI_COM, frame_val); loops = PHY_BUSY_LOOPS; @@ -1063,6 +1035,24 @@ out: phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 phy_reg; + + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); + + if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) { + u32 phy_reg2; + + tg3_writephy(tp, MII_TG3_EPHY_TEST, + phy_reg | MII_TG3_EPHY_SHADOW_EN); + /* Enable auto-MDIX */ + if (!tg3_readphy(tp, 0x10, &phy_reg2)) + tg3_writephy(tp, 0x10, phy_reg2 | 0x4000); + tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg); + } + } + tg3_phy_set_wirespeed(tp); return 0; } @@ -1176,6 +1166,15 @@ static void tg3_nvram_unlock(struct tg3 *); static void tg3_power_down_phy(struct tg3 *tp) { + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); + } + /* The PHY should not be powered down on some chips because * of bugs. */ @@ -1258,7 +1257,12 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tg3_setup_phy(tp, 0); } - if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { int i; u32 val; @@ -1282,7 +1286,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); udelay(40); - mac_mode = MAC_MODE_PORT_MODE_MII; + if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 || !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)) @@ -1360,15 +1367,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) } if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { - /* Turn off the PHY */ - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); - tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); - tg3_power_down_phy(tp); - } - } + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + tg3_power_down_phy(tp); tg3_frob_aux_power(tp); @@ -1481,7 +1481,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv if (old_rx_mode != tp->rx_mode) { tw32_f(MAC_RX_MODE, tp->rx_mode); } - + if (new_tg3_flags & TG3_FLAG_TX_PAUSE) tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; else @@ -1526,6 +1526,13 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 break; default: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } *speed = SPEED_INVALID; *duplex = DUPLEX_INVALID; break; @@ -1808,7 +1815,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else + else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) tg3_writephy(tp, MII_TG3_IMASK, ~0); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || @@ -2465,24 +2472,27 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) expected_sg_dig_ctrl |= (1 << 12); if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } +restart_autoneg: if (workaround) tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30)); udelay(5); tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); - tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED; + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; } else if (mac_status & (MAC_STATUS_PCS_SYNCED | MAC_STATUS_SIGNAL_DET)) { - int i; - - /* Giver time to negotiate (~200ms) */ - for (i = 0; i < 40000; i++) { - sg_dig_status = tr32(SG_DIG_STATUS); - if (sg_dig_status & (0x3)) - break; - udelay(5); - } + sg_dig_status = tr32(SG_DIG_STATUS); mac_status = tr32(MAC_STATUS); if ((sg_dig_status & (1 << 1)) && @@ -2498,10 +2508,11 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; - tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; + tp->serdes_counter = 0; + tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; } else if (!(sg_dig_status & (1 << 1))) { - if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) - tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; + if (tp->serdes_counter) + tp->serdes_counter--; else { if (workaround) { u32 val = serdes_cfg; @@ -2525,9 +2536,17 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) !(mac_status & MAC_STATUS_RCVD_CFG)) { tg3_setup_flow_control(tp, 0, 0); current_link_up = 1; - } + tp->tg3_flags2 |= + TG3_FLG2_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; } } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; } out: @@ -2546,7 +2565,7 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) if (tp->link_config.autoneg == AUTONEG_ENABLE) { u32 flags; int i; - + if (fiber_autoneg(tp, &flags)) { u32 local_adv, remote_adv; @@ -2658,14 +2677,16 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) MAC_STATUS_CFG_CHANGED)); udelay(5); if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)) == 0) + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) break; } mac_status = tr32(MAC_STATUS); if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { current_link_up = 0; - if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); udelay(1); @@ -2770,7 +2791,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) tg3_writephy(tp, MII_BMCR, bmcr); tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED; + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; return err; @@ -2875,9 +2896,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) static void tg3_serdes_parallel_detect(struct tg3 *tp) { - if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) { + if (tp->serdes_counter) { /* Give autoneg time to complete. */ - tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; + tp->serdes_counter--; return; } if (!netif_carrier_ok(tp->dev) && @@ -2988,6 +3009,13 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } +static inline u32 tg3_tx_avail(struct tg3 *tp) +{ + smp_mb(); + return (tp->tx_pending - + ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -3039,12 +3067,20 @@ static void tg3_tx(struct tg3 *tp) tp->tx_cons = sw_idx; - if (unlikely(netif_queue_stopped(tp->dev))) { - spin_lock(&tp->tx_lock); + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(tp->dev) && + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { + netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); + netif_tx_unlock(tp->dev); } } @@ -3098,11 +3134,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = dev_alloc_skb(skb_size); + skb = netdev_alloc_skb(tp->dev, skb_size); if (skb == NULL) return -ENOMEM; - skb->dev = tp->dev; skb_reserve(skb, tp->rx_offset); mapping = pci_map_single(tp->pdev, skb->data, @@ -3195,7 +3230,7 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) */ static int tg3_rx(struct tg3 *tp, int budget) { - u32 work_mask; + u32 work_mask, rx_std_posted = 0; u32 sw_idx = tp->rx_rcb_ptr; u16 hw_idx; int received; @@ -3222,6 +3257,7 @@ static int tg3_rx(struct tg3 *tp, int budget) mapping); skb = tp->rx_std_buffers[desc_idx].skb; post_ptr = &tp->rx_std_ptr; + rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], mapping); @@ -3247,7 +3283,7 @@ static int tg3_rx(struct tg3 *tp, int budget) len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ - if (len > RX_COPY_THRESHOLD + if (len > RX_COPY_THRESHOLD && tp->rx_offset == 2 /* rx_offset != 2 iff this is a 5701 card running * in PCI-X mode [see tg3_get_invariants()] */ @@ -3270,11 +3306,10 @@ static int tg3_rx(struct tg3 *tp, int budget) tg3_recycle_rx(tp, opaque_key, desc_idx, *post_ptr); - copy_skb = dev_alloc_skb(len + 2); + copy_skb = netdev_alloc_skb(tp->dev, len + 2); if (copy_skb == NULL) goto drop_it_no_recycle; - copy_skb->dev = tp->dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); @@ -3309,6 +3344,15 @@ static int tg3_rx(struct tg3 *tp, int budget) next_pkt: (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + u32 idx = *post_ptr % TG3_RX_RING_SIZE; + + tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + + TG3_64BIT_REG_LOW, idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } next_pkt_nopost: sw_idx++; sw_idx %= TG3_RX_RCB_RING_SIZE(tp); @@ -3571,8 +3615,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, if ((sblk->status & SD_STATUS_UPDATED) || !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - 0x00000001); + tg3_disable_ints(tp); return IRQ_RETVAL(1); } return IRQ_RETVAL(0); @@ -3581,6 +3624,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, static int tg3_init_hw(struct tg3 *, int); static int tg3_halt(struct tg3 *, int, int); +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, int reset_phy) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + printk(KERN_ERR PFX "%s: Failed to re-initialize device, " + "aborting.\n", tp->dev->name); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + netif_poll_enable(tp->dev); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void tg3_poll_controller(struct net_device *dev) { @@ -3621,13 +3686,15 @@ static void tg3_reset_task(void *_data) } tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - tg3_init_hw(tp, 1); + if (tg3_init_hw(tp, 1)) + goto out; tg3_netif_start(tp); if (restart_timer) mod_timer(&tp->timer, jiffies + 1); +out: tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; tg3_full_unlock(tp); @@ -3764,7 +3831,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3789,24 +3856,30 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_unlock; } - tcp_opt_len = ((skb->h.th->doff - 5) * 4); - ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + mss |= (skb_headlen(skb) - ETH_HLEN) << 9; + else { + tcp_opt_len = ((skb->h.th->doff - 5) * 4); + ip_tcp_len = (skb->nh.iph->ihl * 4) + + sizeof(struct tcphdr); + + skb->nh.iph->check = 0; + skb->nh.iph->tot_len = htons(mss + ip_tcp_len + + tcp_opt_len); + mss |= (ip_tcp_len + tcp_opt_len) << 9; + } base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - skb->nh.iph->check = 0; - skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - skb->h.th->check = 0; - mss |= (ip_tcp_len + tcp_opt_len) << 9; } - else if (skb->ip_summed == CHECKSUM_HW) + else if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; #else mss = 0; - if (skb->ip_summed == CHECKSUM_HW) + if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; #endif #if TG3_VLAN_TAG_USED @@ -3854,12 +3927,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -3870,6 +3941,40 @@ out_unlock: return NETDEV_TX_OK; } +#if TG3_TSO_SUPPORT != 0 +static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); + +/* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) +{ + struct sk_buff *segs, *nskb; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + netif_stop_queue(tp->dev); + return NETDEV_TX_BUSY; + } + + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (unlikely(IS_ERR(segs))) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit_dma_bug(nskb, tp->dev); + } while (segs); + +tg3_tso_bug_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} +#endif + /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and * support TG3_FLG2_HW_TSO_1 or firmware TSO only. */ @@ -3887,7 +3992,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3900,13 +4005,13 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) entry = tp->tx_prod; base_flags = 0; - if (skb->ip_summed == CHECKSUM_HW) + if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; #if TG3_TSO_SUPPORT != 0 mss = 0; if (skb->len > (tp->dev->mtu + ETH_HLEN) && (mss = skb_shinfo(skb)->gso_size) != 0) { - int tcp_opt_len, ip_tcp_len; + int tcp_opt_len, ip_tcp_len, hdr_len; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -3917,11 +4022,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tcp_opt_len = ((skb->h.th->doff - 5) * 4); ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + if (unlikely((ETH_HLEN + hdr_len) > 80) && + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG)) + return (tg3_tso_bug(tp, skb)); + base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); skb->nh.iph->check = 0; - skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); + skb->nh.iph->tot_len = htons(mss + hdr_len); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { skb->h.th->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; @@ -4032,12 +4142,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -4070,6 +4178,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, static int tg3_change_mtu(struct net_device *dev, int new_mtu) { struct tg3 *tp = netdev_priv(dev); + int err; if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) return -EINVAL; @@ -4090,13 +4199,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) tg3_set_mtu(dev, tp, new_mtu); - tg3_init_hw(tp, 0); + err = tg3_restart_hw(tp, 0); - tg3_netif_start(tp); + if (!err) + tg3_netif_start(tp); tg3_full_unlock(tp); - return 0; + return err; } /* Free up pending packets in all rx/tx rings. @@ -4178,7 +4288,7 @@ static void tg3_free_rings(struct tg3 *tp) * end up in the driver. tp->{tx,}lock are held and thus * we may not sleep. */ -static void tg3_init_rings(struct tg3 *tp) +static int tg3_init_rings(struct tg3 *tp) { u32 i; @@ -4227,18 +4337,38 @@ static void tg3_init_rings(struct tg3 *tp) /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, - -1, i) < 0) + if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { + printk(KERN_WARNING PFX + "%s: Using a smaller RX standard ring, " + "only %d out of %d buffers were allocated " + "successfully.\n", + tp->dev->name, i, tp->rx_pending); + if (i == 0) + return -ENOMEM; + tp->rx_pending = i; break; + } } if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { for (i = 0; i < tp->rx_jumbo_pending; i++) { if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, - -1, i) < 0) + -1, i) < 0) { + printk(KERN_WARNING PFX + "%s: Using a smaller RX jumbo ring, " + "only %d out of %d buffers were " + "allocated successfully.\n", + tp->dev->name, i, tp->rx_jumbo_pending); + if (i == 0) { + tg3_free_rings(tp); + return -ENOMEM; + } + tp->rx_jumbo_pending = i; break; + } } } + return 0; } /* @@ -4593,6 +4723,44 @@ static void tg3_write_sig_legacy(struct tg3 *tp, int kind) } } +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + for (i = 0; i < 400; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(10); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && + !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { + tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; + + printk(KERN_INFO PFX "%s: No firmware running.\n", + tp->dev->name); + } + + return 0; +} + static void tg3_stop_fw(struct tg3 *); /* tp->lock is held. */ @@ -4600,7 +4768,7 @@ static int tg3_chip_reset(struct tg3 *tp) { u32 val; void (*write_op)(struct tg3 *, u32, u32); - int i; + int err; tg3_nvram_lock(tp); @@ -4637,6 +4805,12 @@ static int tg3_chip_reset(struct tg3 *tp) } } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) val |= GRC_MISC_CFG_KEEP_GPHY_POWER; tw32(GRC_MISC_CFG, val); @@ -4760,26 +4934,9 @@ static int tg3_chip_reset(struct tg3 *tp) tw32_f(MAC_MODE, 0); udelay(40); - /* Wait for firmware initialization to complete. */ - for (i = 0; i < 100000; i++) { - tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) - break; - udelay(10); - } - - /* Chip might not be fitted with firmare. Some Sun onboard - * parts are configured like that. So don't signal the timeout - * of the above loop as an error, but do report the lack of - * running firmware once. - */ - if (i >= 100000 && - !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { - tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; - - printk(KERN_INFO PFX "%s: No firmware running.\n", - tp->dev->name); - } + err = tg3_poll_fw(tp); + if (err) + return err; if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { @@ -4863,7 +5020,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) #define TG3_FW_BSS_ADDR 0x08000a70 #define TG3_FW_BSS_LEN 0x10 -static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { +static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, @@ -4957,7 +5114,7 @@ static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 }; -static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { +static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, @@ -4985,6 +5142,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) BUG_ON(offset == TX_CPU_BASE && (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } if (offset == RX_CPU_BASE) { for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); @@ -5022,13 +5185,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) struct fw_info { unsigned int text_base; unsigned int text_len; - u32 *text_data; + const u32 *text_data; unsigned int rodata_base; unsigned int rodata_len; - u32 *rodata_data; + const u32 *rodata_data; unsigned int data_base; unsigned int data_len; - u32 *data_data; + const u32 *data_data; }; /* tp->lock is held. */ @@ -5160,7 +5323,7 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) #define TG3_TSO_FW_BSS_ADDR 0x08001b80 #define TG3_TSO_FW_BSS_LEN 0x894 -static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { +static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, @@ -5447,7 +5610,7 @@ static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, }; -static u32 tg3TsoFwRodata[] = { +static const u32 tg3TsoFwRodata[] = { 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, @@ -5455,7 +5618,7 @@ static u32 tg3TsoFwRodata[] = { 0x00000000, }; -static u32 tg3TsoFwData[] = { +static const u32 tg3TsoFwData[] = { 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -5477,7 +5640,7 @@ static u32 tg3TsoFwData[] = { #define TG3_TSO5_FW_BSS_ADDR 0x00010f50 #define TG3_TSO5_FW_BSS_LEN 0x88 -static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { +static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, @@ -5636,14 +5799,14 @@ static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { 0x00000000, 0x00000000, 0x00000000, }; -static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { +static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000, 0x00000000, }; -static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { +static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; @@ -5761,6 +5924,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) { struct tg3 *tp = netdev_priv(dev); struct sockaddr *addr = p; + int err = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; @@ -5778,9 +5942,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 0); - - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 0); + if (!err) + tg3_netif_start(tp); tg3_full_unlock(tp); } else { spin_lock_bh(&tp->lock); @@ -5788,7 +5952,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) spin_unlock_bh(&tp->lock); } - return 0; + return err; } /* tp->lock is held. */ @@ -5888,7 +6052,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) * can only do this after the hardware has been * successfully reset. */ - tg3_init_rings(tp); + err = tg3_init_rings(tp); + if (err) + return err; /* This value is determined during the probe time DMA * engine test, tg3_test_dma. @@ -5981,7 +6147,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } /* Setup replenish threshold. */ - tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8); + val = tp->rx_pending / 8; + if (val == 0) + val = 1; + else if (val > tp->rx_std_max_post) + val = tp->rx_std_max_post; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) + val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; + } + + tw32(RCVBDI_STD_THRESH, val); /* Initialize TG3_BDINFO's at: * RCVDBDI_STD_BD: standard eth size rx ring @@ -6141,8 +6320,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) #endif /* Receive/send statistics. */ - if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && - (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { val = tr32(RCVLPC_STATS_ENABLE); val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; tw32(RCVLPC_STATS_ENABLE, val); @@ -6396,7 +6579,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { + if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) { u32 tmp; /* Clear CRC stats. */ @@ -6596,12 +6780,14 @@ static void tg3_timer(unsigned long __opaque) need_setup = 1; } if (need_setup) { - tw32_f(MAC_MODE, - (tp->mac_mode & - ~MAC_MODE_PORT_MODE_MASK)); - udelay(40); - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } tg3_setup_phy(tp, 0); } } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) @@ -6610,13 +6796,29 @@ static void tg3_timer(unsigned long __opaque) tp->timer_counter = tp->timer_multiplier; } - /* Heartbeat is only sent once every 2 seconds. */ + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ if (!--tp->asf_counter) { if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { u32 val; tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, - FWCMD_NICDRV_ALIVE2); + FWCMD_NICDRV_ALIVE3); tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); /* 5 seconds timeout */ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); @@ -6644,12 +6846,12 @@ static int tg3_request_irq(struct tg3 *tp) fn = tg3_msi; if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) fn = tg3_msi_1shot; - flags = SA_SAMPLE_RANDOM; + flags = IRQF_SAMPLE_RANDOM; } else { fn = tg3_interrupt; if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) fn = tg3_interrupt_tagged; - flags = SA_SHIRQ | SA_SAMPLE_RANDOM; + flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; } return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); } @@ -6657,8 +6859,7 @@ static int tg3_request_irq(struct tg3 *tp) static int tg3_test_interrupt(struct tg3 *tp) { struct net_device *dev = tp->dev; - int err, i; - u32 int_mbox = 0; + int err, i, intr_ok = 0; if (!netif_running(dev)) return -ENODEV; @@ -6668,7 +6869,7 @@ static int tg3_test_interrupt(struct tg3 *tp) free_irq(tp->pdev->irq, dev); err = request_irq(tp->pdev->irq, tg3_test_isr, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); if (err) return err; @@ -6679,23 +6880,31 @@ static int tg3_test_interrupt(struct tg3 *tp) HOSTCC_MODE_NOW); for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); - if (int_mbox != 0) + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; break; + } + msleep(10); } tg3_disable_ints(tp); free_irq(tp->pdev->irq, dev); - + err = tg3_request_irq(tp); if (err) return err; - if (int_mbox != 0) + if (intr_ok) return 0; return -EIO; @@ -6872,9 +7081,10 @@ static int tg3_open(struct net_device *dev) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { - u32 val = tr32(0x7c04); + u32 val = tr32(PCIE_TRANSACTION_CFG); - tw32(0x7c04, val | (1 << 29)); + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); } } } @@ -7316,7 +7526,7 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev) get_stat64(&hw_stats->rx_ucast_packets) + get_stat64(&hw_stats->rx_mcast_packets) + get_stat64(&hw_stats->rx_bcast_packets); - + stats->tx_packets = old_stats->tx_packets + get_stat64(&hw_stats->tx_ucast_packets) + get_stat64(&hw_stats->tx_mcast_packets) + @@ -7624,7 +7834,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return 0; } -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { @@ -7688,7 +7898,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); - + cmd->supported = (SUPPORTED_Autoneg); if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) @@ -7706,7 +7916,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->supported |= SUPPORTED_FIBRE; cmd->port = PORT_FIBRE; } - + cmd->advertising = tp->link_config.advertising; if (netif_running(dev)) { cmd->speed = tp->link_config.active_speed; @@ -7719,12 +7929,12 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->maxrxpkt = 0; return 0; } - + static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct tg3 *tp = netdev_priv(dev); - - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { + + if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { /* These are the only valid advertisement bits allowed. */ if (cmd->autoneg == AUTONEG_ENABLE && (cmd->advertising & ~(ADVERTISED_1000baseT_Half | @@ -7756,69 +7966,69 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.speed = cmd->speed; tp->link_config.duplex = cmd->duplex; } - + if (netif_running(dev)) tg3_setup_phy(tp, 1); tg3_full_unlock(tp); - + return 0; } - + static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tg3 *tp = netdev_priv(dev); - + strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); strcpy(info->fw_version, tp->fw_ver); strcpy(info->bus_info, pci_name(tp->pdev)); } - + static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); - + wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) wol->wolopts = WAKE_MAGIC; memset(&wol->sopass, 0, sizeof(wol->sopass)); } - + static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct tg3 *tp = netdev_priv(dev); - + if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; if ((wol->wolopts & WAKE_MAGIC) && - tp->tg3_flags2 & TG3_FLG2_PHY_SERDES && + tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) return -EINVAL; - + spin_lock_bh(&tp->lock); if (wol->wolopts & WAKE_MAGIC) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; else tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; spin_unlock_bh(&tp->lock); - + return 0; } - + static u32 tg3_get_msglevel(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); return tp->msg_enable; } - + static void tg3_set_msglevel(struct net_device *dev, u32 value) { struct tg3 *tp = netdev_priv(dev); tp->msg_enable = value; } - + #if TG3_TSO_SUPPORT != 0 static int tg3_set_tso(struct net_device *dev, u32 value) { @@ -7829,16 +8039,23 @@ static int tg3_set_tso(struct net_device *dev, u32 value) return -EINVAL; return 0; } + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { + if (value) + dev->features |= NETIF_F_TSO6; + else + dev->features &= ~NETIF_F_TSO6; + } return ethtool_op_set_tso(dev, value); } #endif - + static int tg3_nway_reset(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 bmcr; int r; - + if (!netif_running(dev)) return -EAGAIN; @@ -7856,14 +8073,14 @@ static int tg3_nway_reset(struct net_device *dev) r = 0; } spin_unlock_bh(&tp->lock); - + return r; } - + static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); - + ering->rx_max_pending = TG3_RX_RING_SIZE - 1; ering->rx_mini_max_pending = 0; if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) @@ -7882,24 +8099,24 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * ering->tx_pending = tp->tx_pending; } - + static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0; - + int irq_sync = 0, err = 0; + if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || (ering->tx_pending > TG3_TX_RING_SIZE - 1)) return -EINVAL; - + if (netif_running(dev)) { tg3_netif_stop(tp); irq_sync = 1; } tg3_full_lock(tp, irq_sync); - + tp->rx_pending = ering->rx_pending; if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && @@ -7910,29 +8127,30 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); } tg3_full_unlock(tp); - - return 0; + + return err; } - + static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); - + epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0; epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0; } - + static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct tg3 *tp = netdev_priv(dev); - int irq_sync = 0; - + int irq_sync = 0, err = 0; + if (netif_running(dev)) { tg3_netif_stop(tp); irq_sync = 1; @@ -7955,51 +8173,52 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (netif_running(dev)) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); } tg3_full_unlock(tp); - - return 0; + + return err; } - + static u32 tg3_get_rx_csum(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; } - + static int tg3_set_rx_csum(struct net_device *dev, u32 data) { struct tg3 *tp = netdev_priv(dev); - + if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { if (data != 0) return -EINVAL; return 0; } - + spin_lock_bh(&tp->lock); if (data) tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; spin_unlock_bh(&tp->lock); - + return 0; } - + static int tg3_set_tx_csum(struct net_device *dev, u32 data) { struct tg3 *tp = netdev_priv(dev); - + if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { if (data != 0) return -EINVAL; return 0; } - + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ethtool_op_set_tx_hw_csum(dev, data); @@ -8054,7 +8273,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data) LED_CTRL_TRAFFIC_OVERRIDE | LED_CTRL_TRAFFIC_BLINK | LED_CTRL_TRAFFIC_LED); - + else tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | LED_CTRL_TRAFFIC_OVERRIDE); @@ -8075,6 +8294,8 @@ static void tg3_get_ethtool_stats (struct net_device *dev, #define NVRAM_TEST_SIZE 0x100 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 +#define NVRAM_SELFBOOT_HW_SIZE 0x20 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c static int tg3_test_nvram(struct tg3 *tp) { @@ -8086,12 +8307,14 @@ static int tg3_test_nvram(struct tg3 *tp) if (magic == TG3_EEPROM_MAGIC) size = NVRAM_TEST_SIZE; - else if ((magic & 0xff000000) == 0xa5000000) { + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { if ((magic & 0xe00000) == 0x200000) size = NVRAM_SELFBOOT_FORMAT1_SIZE; else return 0; - } else + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else return -EIO; buf = kmalloc(size, GFP_KERNEL); @@ -8110,7 +8333,8 @@ static int tg3_test_nvram(struct tg3 *tp) goto out; /* Selfboot format */ - if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) { + if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { u8 *buf8 = (u8 *) buf, csum8 = 0; for (i = 0; i < size; i++) @@ -8125,6 +8349,51 @@ static int tg3_test_nvram(struct tg3 *tp) goto out; } + if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + int j, k; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); if(csum != cpu_to_le32(buf[0x10/4])) @@ -8171,7 +8440,7 @@ static int tg3_test_link(struct tg3 *tp) /* Only test the commonly used registers */ static int tg3_test_registers(struct tg3 *tp) { - int i, is_5705; + int i, is_5705, is_5750; u32 offset, read_mask, write_mask, val, save_val, read_val; static struct { u16 offset; @@ -8179,6 +8448,7 @@ static int tg3_test_registers(struct tg3 *tp) #define TG3_FL_5705 0x1 #define TG3_FL_NOT_5705 0x2 #define TG3_FL_NOT_5788 0x4 +#define TG3_FL_NOT_5750 0x8 u32 read_mask; u32 write_mask; } reg_tbl[] = { @@ -8231,7 +8501,7 @@ static int tg3_test_registers(struct tg3 *tp) 0x00000000, 0xffff0002 }, { RCVDBDI_STD_BD+0xc, 0x0000, 0x00000000, 0xffffffff }, - + /* Receive BD Initiator Control Registers. */ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, @@ -8239,7 +8509,7 @@ static int tg3_test_registers(struct tg3 *tp) 0x00000000, 0x000003ff }, { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 0x00000000, 0xffffffff }, - + /* Host Coalescing Control Registers. */ { HOSTCC_MODE, TG3_FL_NOT_5705, 0x00000000, 0x00000004 }, @@ -8289,9 +8559,9 @@ static int tg3_test_registers(struct tg3 *tp) 0xffffffff, 0x00000000 }, /* Buffer Manager Control Registers. */ - { BUFMGR_MB_POOL_ADDR, 0x0000, + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, 0x00000000, 0x007fff80 }, - { BUFMGR_MB_POOL_SIZE, 0x0000, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, 0x00000000, 0x007fffff }, { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 0x00000000, 0x0000003f }, @@ -8303,7 +8573,7 @@ static int tg3_test_registers(struct tg3 *tp) 0xffffffff, 0x00000000 }, { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 0xffffffff, 0x00000000 }, - + /* Mailbox Registers */ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 0x00000000, 0x000001ff }, @@ -8317,10 +8587,12 @@ static int tg3_test_registers(struct tg3 *tp) { 0xffff, 0x0000, 0x00000000, 0x00000000 }, }; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + is_5705 = is_5750 = 0; + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { is_5705 = 1; - else - is_5705 = 0; + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) + is_5750 = 1; + } for (i = 0; reg_tbl[i].offset != 0xffff; i++) { if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) @@ -8333,6 +8605,9 @@ static int tg3_test_registers(struct tg3 *tp) (reg_tbl[i].flags & TG3_FL_NOT_5788)) continue; + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + offset = (u32) reg_tbl[i].offset; read_mask = reg_tbl[i].read_mask; write_mask = reg_tbl[i].write_mask; @@ -8424,6 +8699,13 @@ static int tg3_test_memory(struct tg3 *tp) { 0x00008000, 0x02000}, { 0x00010000, 0x0c000}, { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} }; struct mem_entry *mem_tbl; int err = 0; @@ -8433,6 +8715,8 @@ static int tg3_test_memory(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; else mem_tbl = mem_tbl_5705; } else @@ -8443,7 +8727,7 @@ static int tg3_test_memory(struct tg3 *tp) mem_tbl[i].len)) != 0) break; } - + return err; } @@ -8469,13 +8753,41 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) return 0; mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | - MAC_MODE_PORT_MODE_GMII; + MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY; + if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else + mac_mode |= MAC_MODE_PORT_MODE_GMII; tw32(MAC_MODE, mac_mode); } else if (loopback_mode == TG3_PHY_LOOPBACK) { - tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX | - BMCR_SPEED1000); + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_EPHY_TEST, + phytest | MII_TG3_EPHY_SHADOW_EN); + if (!tg3_readphy(tp, 0x1b, &phy)) + tg3_writephy(tp, 0x1b, phy & ~0x20); + if (!tg3_readphy(tp, 0x10, &phy)) + tg3_writephy(tp, 0x10, phy & ~0x4000); + tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); + } + } + val = BMCR_LOOPBACK | BMCR_FULLDPLX; + if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + val |= BMCR_SPEED100; + else + val |= BMCR_SPEED1000; + + tg3_writephy(tp, MII_BMCR, val); udelay(40); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); + /* reset to prevent losing 1st rx packet intermittently */ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); @@ -8483,7 +8795,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) tw32_f(MAC_RX_MODE, tp->rx_mode); } mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; + MAC_MODE_LINK_POLARITY; + if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else + mac_mode |= MAC_MODE_PORT_MODE_GMII; if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { mac_mode &= ~MAC_MODE_LINK_POLARITY; tg3_writephy(tp, MII_TG3_EXT_CTRL, @@ -8497,7 +8813,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) err = -EIO; tx_len = 1514; - skb = dev_alloc_skb(tx_len); + skb = netdev_alloc_skb(tp->dev, tx_len); if (!skb) return -ENOMEM; @@ -8532,7 +8848,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(10); - for (i = 0; i < 10; i++) { + /* 250 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 25; i++) { tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); @@ -8578,7 +8895,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) goto out; } err = 0; - + /* tg3_free_rings will unmap and free the rx_skb */ out: return err; @@ -8596,7 +8913,9 @@ static int tg3_test_loopback(struct tg3 *tp) if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; - tg3_reset_hw(tp, 1); + err = tg3_reset_hw(tp, 1); + if (err) + return TG3_LOOPBACK_FAILED; if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) err |= TG3_MAC_LOOPBACK_FAILED; @@ -8670,8 +8989,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); if (netif_running(dev)) { tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); - tg3_netif_start(tp); + if (!tg3_restart_hw(tp, 1)) + tg3_netif_start(tp); } tg3_full_unlock(tp); @@ -8738,6 +9057,9 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct tg3 *tp = netdev_priv(dev); + if (netif_running(dev)) + tg3_netif_stop(tp); + tg3_full_lock(tp, 0); tp->vlgrp = grp; @@ -8746,16 +9068,25 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) __tg3_set_rx_mode(dev); tg3_full_unlock(tp); + + if (netif_running(dev)) + tg3_netif_start(tp); } static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct tg3 *tp = netdev_priv(dev); + if (netif_running(dev)) + tg3_netif_stop(tp); + tg3_full_lock(tp, 0); if (tp->vlgrp) tp->vlgrp->vlan_devices[vid] = NULL; tg3_full_unlock(tp); + + if (netif_running(dev)) + tg3_netif_start(tp); } #endif @@ -8821,7 +9152,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static struct ethtool_ops tg3_ethtool_ops = { +static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, .get_drvinfo = tg3_get_drvinfo, @@ -8870,7 +9201,9 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) if (tg3_nvram_read_swab(tp, 0, &magic) != 0) return; - if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000)) + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) return; /* @@ -8892,7 +9225,7 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) tp->nvram_size = cursize; } - + static void __devinit tg3_get_nvram_size(struct tg3 *tp) { u32 val; @@ -9108,6 +9441,13 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) } } +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; +} + /* Chips other than 5700/5701 use the NVRAM for fetching info. */ static void __devinit tg3_nvram_init(struct tg3 *tp) { @@ -9144,6 +9484,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) tg3_get_5755_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -9308,7 +9650,7 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, (addr & EEPROM_ADDR_ADDR_MASK) | EEPROM_ADDR_START | EEPROM_ADDR_WRITE); - + for (j = 0; j < 10000; j++) { val = tr32(GRC_EEPROM_ADDR); @@ -9344,7 +9686,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, u32 phy_addr, page_off, size; phy_addr = offset & ~pagemask; - + for (j = 0; j < pagesize; j += 4) { if ((ret = tg3_nvram_read(tp, phy_addr + j, (u32 *) (tmp + j)))) @@ -9617,6 +9959,12 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) /* Assume an onboard device by default. */ tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) + tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; + return; + } + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { u32 nic_cfg, led_cfg; @@ -9800,7 +10148,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) if (!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) goto skip_phy_reset; - + err = tg3_phy_reset(tp); if (err) return err; @@ -9948,7 +10296,10 @@ static void __devinit tg3_read_partno(struct tg3 *tp) } out_not_found: - strcpy(tp->board_part_number, "none"); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + strcpy(tp->board_part_number, "BCM95906"); + else + strcpy(tp->board_part_number, "none"); } static void __devinit tg3_read_fw_ver(struct tg3 *tp) @@ -9996,6 +10347,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) static struct pci_device_id write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, { }, @@ -10148,6 +10501,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -10157,18 +10511,26 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; - } else - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; + } else { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | + TG3_FLG2_HW_TSO_1_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG; + } } if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) @@ -10298,6 +10660,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) pci_cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } if (tp->write32 == tg3_write_indirect_reg32 || ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && @@ -10312,7 +10680,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * When the flag is set, it means that GPIO1 is used for eeprom * write protect and also implies that it is a LOM where GPIOs * are not used to switch power. - */ + */ tg3_get_eeprom_hw_cfg(tp); /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). @@ -10369,6 +10737,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) || (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; @@ -10382,7 +10751,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; - else + else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; } @@ -10472,7 +10841,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || - tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F))) + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->tg3_flags |= TG3_FLAG_10_100_ONLY; err = tg3_phy_probe(tp); @@ -10523,7 +10893,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * straddle the 4GB address boundary in some cases. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->dev->hard_start_xmit = tg3_start_xmit; else tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; @@ -10533,6 +10904,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) tp->rx_offset = 0; + tp->rx_std_max_post = TG3_RX_RING_SIZE; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + /* By default, disable wake-on-lan. User can change this * using ETHTOOL_SWOL. */ @@ -10549,11 +10930,13 @@ static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) struct pcidev_cookie *pcp = pdev->sysdata; if (pcp != NULL) { - int node = pcp->prom_node; + unsigned char *addr; + int len; - if (prom_getproplen(node, "local-mac-address") == 6) { - prom_getproperty(node, "local-mac-address", - dev->dev_addr, 6); + addr = of_get_property(pcp->prom_node, "local-mac-address", + &len); + if (addr && len == 6) { + memcpy(dev->dev_addr, addr, 6); memcpy(dev->perm_addr, dev->dev_addr, 6); return 0; } @@ -10592,6 +10975,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) else tg3_nvram_unlock(tp); } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mac_offset = 0x10; /* First try to get it from MAC address mailbox. */ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); @@ -11075,6 +11460,12 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) DEFAULT_MB_MACRX_LOW_WATER_5705; tp->bufmgr_config.mbuf_high_water = DEFAULT_MB_HIGH_WATER_5705; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; @@ -11118,6 +11509,8 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5780: return "5780"; case PHY_ID_BCM5755: return "5755"; case PHY_ID_BCM5787: return "5787"; + case PHY_ID_BCM5756: return "5722/5756"; + case PHY_ID_BCM5906: return "5906"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -11319,7 +11712,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); @@ -11419,8 +11811,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { dev->features |= NETIF_F_TSO; + if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) + dev->features |= NETIF_F_TSO6; + } #endif @@ -11594,7 +11990,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tg3_full_lock(tp, 0); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); + if (tg3_restart_hw(tp, 1)) + goto out; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); @@ -11602,6 +11999,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_attach(dev); tg3_netif_start(tp); +out: tg3_full_unlock(tp); } @@ -11628,16 +12026,19 @@ static int tg3_resume(struct pci_dev *pdev) tg3_full_lock(tp, 0); tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; - tg3_init_hw(tp, 1); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); tg3_netif_start(tp); +out: tg3_full_unlock(tp); - return 0; + return err; } static struct pci_driver tg3_driver = { @@ -11651,7 +12052,7 @@ static struct pci_driver tg3_driver = { static int __init tg3_init(void) { - return pci_module_init(&tg3_driver); + return pci_register_driver(&tg3_driver); } static void __exit tg3_cleanup(void) |