diff options
Diffstat (limited to 'drivers/net')
89 files changed, 2054 insertions, 1064 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8cb75a6efec..1dcb07ce526 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -557,7 +557,7 @@ down: static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET }; + struct ethtool_cmd ecmd; u32 slave_speed; int res; @@ -565,18 +565,15 @@ static int bond_update_speed_duplex(struct slave *slave) slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL; - if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) - return -1; - - res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); + res = __ethtool_get_settings(slave_dev, &ecmd); if (res < 0) return -1; - slave_speed = ethtool_cmd_speed(&etool); + slave_speed = ethtool_cmd_speed(&ecmd); if (slave_speed == 0 || slave_speed == ((__u32) -1)) return -1; - switch (etool.duplex) { + switch (ecmd.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; @@ -585,7 +582,7 @@ static int bond_update_speed_duplex(struct slave *slave) } slave->speed = slave_speed; - slave->duplex = etool.duplex; + slave->duplex = ecmd.duplex; return 0; } diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 6e1f5959a65..9ca45dcba75 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2179,9 +2179,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; vp->tx_ring[entry].frag[i+1].addr = - cpu_to_le32(pci_map_single(VORTEX_PCI(vp), - (void*)page_address(frag->page) + frag->page_offset, - frag->size, PCI_DMA_TODEVICE)); + cpu_to_le32(pci_map_single( + VORTEX_PCI(vp), + (void *)skb_frag_address(frag), + frag->size, PCI_DMA_TODEVICE)); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index a439cbdda3b..a8bb30cf512 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -81,6 +81,7 @@ config PCMCIA_3C589 config VORTEX tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" depends on (PCI || EISA) + select NET_CORE select MII ---help--- This option enables driver support for a large number of 10Mbps and diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1f647471e65..6dff5a0e733 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -62,6 +62,7 @@ config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver supports the PCI-Express gigabit ethernet adapters @@ -102,6 +103,7 @@ config FEALNX tristate "Myson MTD-8xx PCI Ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- Say Y here to support the Myson MTD-800 family of PCI-based Ethernet @@ -112,6 +114,7 @@ source "drivers/net/ethernet/8390/Kconfig" config NET_NETX tristate "NetX Ethernet support" + select NET_CORE select MII depends on ARCH_NETX ---help--- @@ -128,6 +131,7 @@ source "drivers/net/ethernet/oki-semi/Kconfig" config ETHOC tristate "OpenCores 10/100 Mbps Ethernet MAC support" depends on HAS_IOMEM && HAS_DMA + select NET_CORE select MII select PHYLIB select CRC32 diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig index 5c804bbe3da..0bff571b1bb 100644 --- a/drivers/net/ethernet/adaptec/Kconfig +++ b/drivers/net/ethernet/adaptec/Kconfig @@ -22,6 +22,7 @@ config ADAPTEC_STARFIRE tristate "Adaptec Starfire/DuraLAN support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig index 6de9851045c..49a30d37ae4 100644 --- a/drivers/net/ethernet/adi/Kconfig +++ b/drivers/net/ethernet/adi/Kconfig @@ -23,6 +23,7 @@ config BFIN_MAC tristate "Blackfin on-chip MAC support" depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) select CRC32 + select NET_CORE select MII select PHYLIB select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index a5f6b07f8f3..bc3bd34c43f 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -113,9 +113,8 @@ static void greth_print_tx_packet(struct sk_buff *skb) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, - phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) + - skb_shinfo(skb)->frags[i].page_offset, - length, true); + skb_frag_address(&skb_shinfo(skb)->frags[i]), + skb_shinfo(skb)->frags[i].size, true); } } @@ -528,11 +527,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) greth_write_bd(&bdp->stat, status); - dma_addr = dma_map_page(greth->dev, - frag->page, - frag->page_offset, - frag->size, - DMA_TO_DEVICE); + dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 1d6f2db794f..8794cf831bd 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2485,9 +2485,9 @@ restart: info = ap->skb->tx_skbuff + idx; desc = ap->tx_ring + idx; - mapping = pci_map_page(ap->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, + frag->size, + PCI_DMA_TODEVICE); flagsize = (frag->size << 16); if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 8af1c934dbd..238b537b68f 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -34,6 +34,7 @@ config AMD8111_ETH tristate "AMD 8111 (new PCI LANCE) support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- If you have an AMD 8111-based PCI LANCE ethernet card, @@ -59,6 +60,7 @@ config PCNET32 tristate "AMD PCnet32 PCI support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- If you have a PCnet32 or PCnetPCI based network (Ethernet) card, diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 26ab8cae28b..1ed886d421f 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -22,6 +22,7 @@ config ATL2 tristate "Atheros L2 Fast Ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver supports the Atheros L2 fast ethernet adapter. @@ -33,6 +34,7 @@ config ATL1 tristate "Atheros/Attansic L1 Gigabit Ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver supports the Atheros/Attansic L1 gigabit ethernet @@ -45,6 +47,7 @@ config ATL1E tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)" depends on PCI && EXPERIMENTAL select CRC32 + select NET_CORE select MII ---help--- This driver supports the Atheros L1E gigabit ethernet adapter. @@ -56,6 +59,7 @@ config ATL1C tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)" depends on PCI && EXPERIMENTAL select CRC32 + select NET_CORE select MII ---help--- This driver supports the Atheros L1C gigabit ethernet adapter. diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index acb4c1098ca..2b9f925fdfc 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2180,11 +2180,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); buffer_info->length = frag->size; - buffer_info->dma = - pci_map_page(adapter->pdev, frag->page, - frag->page_offset, - buffer_info->length, - PCI_DMA_TODEVICE); + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, 0, + buffer_info->length, + PCI_DMA_TODEVICE); ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, ATL1C_PCIMAP_TODEVICE); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 1b5dc799348..7e27eb354f1 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1765,12 +1765,11 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, MAX_TX_BUF_LEN : buf_len; buf_len -= tx_buffer->length; - tx_buffer->dma = - pci_map_page(adapter->pdev, frag->page, - frag->page_offset + - (i * MAX_TX_BUF_LEN), - tx_buffer->length, - PCI_DMA_TODEVICE); + tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, + (i * MAX_TX_BUF_LEN), + tx_buffer->length, + PCI_DMA_TODEVICE); ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index c34e82391f7..edf826a5028 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2283,9 +2283,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : buf_len; buf_len -= buffer_info->length; - buffer_info->dma = pci_map_page(adapter->pdev, - frag->page, - frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, i * ATL1_MAX_TX_BUF_LEN, buffer_info->length, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index d82ad221ebd..f15e72e81ac 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -22,6 +22,7 @@ config B44 tristate "Broadcom 440x/47xx ethernet support" depends on SSB_POSSIBLE && HAS_DMA select SSB + select NET_CORE select MII ---help--- If you have a network (Ethernet) controller of this type, say Y @@ -53,6 +54,7 @@ config B44_PCI config BCM63XX_ENET tristate "Broadcom 63xx internal mac support" depends on BCM63XX + select NET_CORE select MII select PHYLIB help diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a7e28a2c534..1485013b4b8 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,6 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) #define DRV_MODULE_RELDATE "August 18, 2011" +#define RESET_KIND_SHUTDOWN 0 +#define RESET_KIND_INIT 1 +#define RESET_KIND_SUSPEND 2 + #define TG3_DEF_RX_MODE 0 #define TG3_DEF_TX_MODE 0 #define TG3_DEF_MSG_ENABLE \ @@ -187,6 +191,12 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) #endif +#if (NET_IP_ALIGN != 0) +#define TG3_RX_OFFSET(tp) ((tp)->rx_offset) +#else +#define TG3_RX_OFFSET(tp) 0 +#endif + /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) #define TG3_TX_BD_DMA_MAX 4096 @@ -718,6 +728,103 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) tg3_ape_write32(tp, gnt + 4 * locknum, bit); } +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + static void tg3_disable_ints(struct tg3 *tp) { int i; @@ -1390,6 +1497,149 @@ static void tg3_ump_link_report(struct tg3 *tp) tg3_generate_fw_event(tp); } +/* tp->lock is held. */ +static void tg3_stop_fw(struct tg3 *tp) +{ + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } +} + +/* tp->lock is held. */ +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) +{ + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_legacy(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; +} + static void tg3_link_report(struct tg3 *tp) { if (!netif_carrier_ok(tp->dev)) { @@ -2481,12 +2731,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) } static int tg3_setup_phy(struct tg3 *, int); - -#define RESET_KIND_SHUTDOWN 0 -#define RESET_KIND_INIT 1 -#define RESET_KIND_SUSPEND 2 - -static void tg3_write_sig_post_reset(struct tg3 *, int); static int tg3_halt_cpu(struct tg3 *, u32); static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) @@ -2755,6 +2999,228 @@ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) return res; } +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +{ + int i; + + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } + + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} + +struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; +}; + +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) +{ + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); + + err = 0; + +out: + return err; +} + +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + int err, i; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; +} + +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; +} + + /* tp->lock is held. */ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) { @@ -3334,8 +3800,9 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) return 0; - if ((adv_reg & all_mask) != all_mask) + if ((adv_reg & ADVERTISE_ALL) != all_mask) return 0; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; @@ -3348,9 +3815,11 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) return 0; - if ((tg3_ctrl & all_mask) != all_mask) + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (tg3_ctrl != all_mask) return 0; } + return 1; } @@ -3651,8 +4120,8 @@ relink: newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; if (newlnkctl != oldlnkctl) pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - newlnkctl); + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); } if (current_link_up != netif_carrier_ok(tp->dev)) { @@ -4982,11 +5451,11 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); if (skb == NULL) return -ENOMEM; - skb_reserve(skb, tp->rx_offset); + skb_reserve(skb, TG3_RX_OFFSET(tp)); mapping = pci_map_single(tp->pdev, skb->data, skb_size, PCI_DMA_FROMDEVICE); @@ -5704,7 +6173,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) * NIC to stop sending us irqs, engaging "in-intr-handler" * event coalescing. */ - tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + tw32_mailbox(tnapi->int_mbox, 0x00000001); if (likely(!tg3_irq_sync(tp))) napi_schedule(&tnapi->napi); @@ -6265,6 +6734,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } } + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + #ifdef BCM_KERNEL_SUPPORTS_8021Q if (vlan_tx_tag_present(skb)) { base_flags |= TXD_FLAG_VLAN; @@ -6272,10 +6745,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } #endif - if (tg3_flag(tp, USE_JUMBO_BDFLAG) && - !mss && skb->len > VLAN_ETH_FRAME_LEN) - base_flags |= TXD_FLAG_JMB_PKT; - len = skb_headlen(skb); mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); @@ -7121,230 +7590,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) return err; } -static void tg3_ape_send_event(struct tg3 *tp, u32 event) -{ - int i; - u32 apedata; - - /* NCSI does not support APE events */ - if (tg3_flag(tp, APE_HAS_NCSI)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); - if (apedata != APE_SEG_SIG_MAGIC) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); - if (!(apedata & APE_FW_STATUS_READY)) - return; - - /* Wait for up to 1 millisecond for APE to service previous event. */ - for (i = 0; i < 10; i++) { - if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) - return; - - apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, - event | APE_EVENT_STATUS_EVENT_PENDING); - - tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - break; - - udelay(100); - } - - if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) - tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); -} - -static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) -{ - u32 event; - u32 apedata; - - if (!tg3_flag(tp, ENABLE_APE)) - return; - - switch (kind) { - case RESET_KIND_INIT: - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, - APE_HOST_SEG_SIG_MAGIC); - tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, - APE_HOST_SEG_LEN_MAGIC); - apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); - tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); - tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, - APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); - tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, - APE_HOST_BEHAV_NO_PHYLOCK); - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, - TG3_APE_HOST_DRVR_STATE_START); - - event = APE_EVENT_STATUS_STATE_START; - break; - case RESET_KIND_SHUTDOWN: - /* With the interface we are currently using, - * APE does not track driver state. Wiping - * out the HOST SEGMENT SIGNATURE forces - * the APE to assume OS absent status. - */ - tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); - - if (device_may_wakeup(&tp->pdev->dev) && - tg3_flag(tp, WOL_ENABLE)) { - tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, - TG3_APE_HOST_WOL_SPEED_AUTO); - apedata = TG3_APE_HOST_DRVR_STATE_WOL; - } else - apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; - - tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); - - event = APE_EVENT_STATUS_STATE_UNLOAD; - break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; - default: - return; - } - - event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; - - tg3_ape_send_event(tp, event); -} - -/* tp->lock is held. */ -static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) -{ - tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, - NIC_SRAM_FIRMWARE_MBOX_MAGIC1); - - if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) -{ - if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START_DONE); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD_DONE); - break; - - default: - break; - } - } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); -} - -/* tp->lock is held. */ -static void tg3_write_sig_legacy(struct tg3 *tp, int kind) -{ - if (tg3_flag(tp, ENABLE_ASF)) { - switch (kind) { - case RESET_KIND_INIT: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_START); - break; - - case RESET_KIND_SHUTDOWN: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_UNLOAD); - break; - - case RESET_KIND_SUSPEND: - tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, - DRV_STATE_SUSPEND); - break; - - default: - break; - } - } -} - -static int tg3_poll_fw(struct tg3 *tp) -{ - int i; - u32 val; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* Wait up to 20ms for init done. */ - for (i = 0; i < 200; i++) { - if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) - return 0; - udelay(100); - } - return -ENODEV; - } - - /* Wait for firmware initialization to complete. */ - for (i = 0; i < 100000; i++) { - tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) - break; - udelay(10); - } - - /* Chip might not be fitted with firmware. Some Sun onboard - * parts are configured like that. So don't signal the timeout - * of the above loop as an error, but do report the lack of - * running firmware once. - */ - if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { - tg3_flag_set(tp, NO_FWARE_REPORTED); - - netdev_info(tp->dev, "No firmware running\n"); - } - - if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { - /* The 57765 A0 needs a little more - * time to do some important work. - */ - mdelay(10); - } - - return 0; -} - /* Save PCI command register before chip reset */ static void tg3_save_pci_state(struct tg3 *tp) { @@ -7416,8 +7661,6 @@ static void tg3_restore_pci_state(struct tg3 *tp) } } -static void tg3_stop_fw(struct tg3 *); - /* tp->lock is held. */ static int tg3_chip_reset(struct tg3 *tp) { @@ -7665,22 +7908,6 @@ static int tg3_chip_reset(struct tg3 *tp) } /* tp->lock is held. */ -static void tg3_stop_fw(struct tg3 *tp) -{ - if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { - /* Wait for RX cpu to ACK the previous event. */ - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); - - tg3_generate_fw_event(tp); - - /* Wait for RX cpu to ACK this event. */ - tg3_wait_for_event_ack(tp); - } -} - -/* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) { int err; @@ -7703,227 +7930,6 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) return 0; } -#define RX_CPU_SCRATCH_BASE 0x30000 -#define RX_CPU_SCRATCH_SIZE 0x04000 -#define TX_CPU_SCRATCH_BASE 0x34000 -#define TX_CPU_SCRATCH_SIZE 0x04000 - -/* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) -{ - int i; - - BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val = tr32(GRC_VCPU_EXT_CTRL); - - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); - return 0; - } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); - } else { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - } - - if (i >= 10000) { - netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); - return -ENODEV; - } - - /* Clear firmware's nvram arbitration. */ - if (tg3_flag(tp, NVRAM)) - tw32(NVRAM_SWARB, SWARB_REQ_CLR0); - return 0; -} - -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const __be32 *fw_data; -}; - -/* tp->lock is held. */ -static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, - int cpu_scratch_size, struct fw_info *info) -{ - int err, lock_err, i; - void (*write_op)(struct tg3 *, u32, u32); - - if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { - netdev_err(tp->dev, - "%s: Trying to load TX cpu firmware which is 5705\n", - __func__); - return -EINVAL; - } - - if (tg3_flag(tp, 5705_PLUS)) - write_op = tg3_write_mem; - else - write_op = tg3_write_indirect_reg32; - - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; - - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - be32_to_cpu(info->fw_data[i])); - - err = 0; - -out: - return err; -} - -/* tp->lock is held. */ -static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - int err, i; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = be32_to_cpu(fw_data[1]); - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, - RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, - TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; - - /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) - break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " - "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); - - return 0; -} - -/* tp->lock is held. */ -static int tg3_load_tso_firmware(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; - - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) - return 0; - - fw_data = (void *)tp->fw->data; - - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = be32_to_cpu(fw_data[1]); - cpu_scratch_size = tp->fw_len; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - cpu_base = RX_CPU_BASE; - cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; - } else { - cpu_base = TX_CPU_BASE; - cpu_scratch_base = TX_CPU_SCRATCH_BASE; - cpu_scratch_size = TX_CPU_SCRATCH_SIZE; - } - - err = tg3_load_firmware_cpu(tp, cpu_base, - cpu_scratch_base, cpu_scratch_size, - &info); - if (err) - return err; - - /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) - break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, - "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); - return 0; -} - - static int tg3_set_mac_addr(struct net_device *dev, void *p) { struct tg3 *tp = netdev_priv(dev); @@ -8103,7 +8109,7 @@ static void tg3_rings_reset(struct tg3 *tp) tw32_mailbox(tp->napi[i].prodmbox, 0); tw32_rx_mbox(tp->napi[i].consmbox, 0); tw32_mailbox_f(tp->napi[i].int_mbox, 1); - tp->napi[0].chk_msi_cnt = 0; + tp->napi[i].chk_msi_cnt = 0; tp->napi[i].last_rx_cons = 0; tp->napi[i].last_tx_cons = 0; } @@ -8799,6 +8805,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { val = tr32(MSGINT_MODE); val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -9183,8 +9191,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp) tnapi->chk_msi_cnt++; return; } - tw32_mailbox(tnapi->int_mbox, - tnapi->last_tag << 24); + tg3_msi(0, tnapi); } } tnapi->chk_msi_cnt = 0; @@ -9415,7 +9422,7 @@ static int tg3_test_interrupt(struct tg3 *tp) if (intr_ok) { /* Reenable MSI one shot mode. */ - if (tg3_flag(tp, 57765_PLUS)) { + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); } @@ -9593,6 +9600,8 @@ static void tg3_ints_init(struct tg3 *tp) u32 msi_mode = tr32(MSGINT_MODE); if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } defcfg: @@ -14071,7 +14080,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) /* BCM5785 devices are effectively PCIe devices, and should * follow PCIe codepaths, but do not have a PCIe capabilities * section. - */ + */ tg3_flag_set(tp, PCI_EXPRESS); } else if (!tg3_flag(tp, 5705_PLUS) || tg3_flag(tp, 5780_CLASS)) { @@ -15531,7 +15540,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; tnapi->int_mbox = intmbx; - if (i < 4) + if (i <= 4) intmbx += 0x8; else intmbx += 0x4; diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index b45b8eb3b9b..8e627186507 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -16,8 +16,6 @@ * www.brocade.com */ -#include "bfa_defs_cna.h" -#include "cna.h" #include "bfa_cee.h" #include "bfi_cna.h" #include "bfa_ioc.h" diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index 205b92b3709..a81c0ccfc2f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -251,10 +251,10 @@ struct bfa_mfg_block { * ---------------------- pci definitions ------------ */ -#define bfa_asic_id_ct(devid) \ - ((devid) == PCI_DEVICE_ID_BROCADE_CT || \ - (devid) == PCI_DEVICE_ID_BROCADE_CT_FC) -#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid)) +#define bfa_asic_id_ct(device) \ + ((device) == PCI_DEVICE_ID_BROCADE_CT || \ + (device) == PCI_DEVICE_ID_BROCADE_CT_FC) +#define bfa_asic_id_ctc(device) (bfa_asic_id_ct(device)) enum bfa_mode { BFA_MODE_HBA = 1, diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 7ddd16f819f..7e5df90528f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -18,7 +18,6 @@ #ifndef __BFA_DEFS_MFG_COMM_H__ #define __BFA_DEFS_MFG_COMM_H__ -#include "cna.h" #include "bfa_defs.h" /** diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index f5a3d4e8207..9116324865c 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -274,8 +274,10 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ (__ioc)->asic_mode)) -#define bfa_ioc_isr_mode_set(__ioc, __msix) \ - ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) +#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \ + if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \ +} while (0) #define bfa_ioc_ownership_reset(__ioc) \ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 19654cc7aba..4e04c140c84 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -73,20 +73,6 @@ struct bfi_mhdr { **************************************************************************** */ -#define BFI_SGE_INLINE 1 -#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) - -/** - * SG Flags - */ -enum { - BFI_SGE_DATA = 0, /*!< data address, not last */ - BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */ - BFI_SGE_DATA_LAST = 3, /*!< data address, last */ - BFI_SGE_LINK = 2, /*!< link address */ - BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */ -}; - /** * DMA addresses */ @@ -97,33 +83,6 @@ union bfi_addr_u { } a32; }; -/** - * Scatter Gather Element - */ -struct bfi_sge { -#ifdef __BIGENDIAN - u32 flags:2, - rsvd:2, - sg_len:28; -#else - u32 sg_len:28, - rsvd:2, - flags:2; -#endif - union bfi_addr_u sga; -}; - -/** - * Scatter Gather Page - */ -#define BFI_SGPG_DATA_SGES 7 -#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1) -#define BFI_SGPG_RSVD_WD_LEN 8 -struct bfi_sgpg { - struct bfi_sge sges[BFI_SGPG_SGES_MAX]; - u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; -}; - /* * Large Message structure - 128 Bytes size Msgs */ @@ -131,11 +90,6 @@ struct bfi_sgpg { #define BFI_LMSG_PL_WSZ \ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) -struct bfi_msg { - struct bfi_mhdr mhdr; - u32 pl[BFI_LMSG_PL_WSZ]; -}; - /** * Mailbox message structure */ diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 2a587c5fdc2..3a6e7906149 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -10,12 +10,17 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ +/* + * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ #ifndef __BNA_H__ #define __BNA_H__ -#include "bfa_cs.h" +#include "bfa_defs.h" #include "bfa_ioc.h" -#include "cna.h" +#include "bfi_enet.h" #include "bna_types.h" extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; @@ -395,12 +400,8 @@ void bna_mod_init(struct bna *bna, struct bna_res_info *res_info); void bna_uninit(struct bna *bna); int bna_num_txq_set(struct bna *bna, int num_txq); int bna_num_rxp_set(struct bna *bna, int num_rxp); -void bna_stats_get(struct bna *bna); -void bna_get_perm_mac(struct bna *bna, u8 *mac); void bna_hw_stats_get(struct bna *bna); -/* APIs for Rx */ - /* APIs for RxF */ struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, @@ -521,11 +522,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); void bna_rx_vlanfilter_enable(struct bna_rx *rx); -void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config, - void (*cbfn)(struct bnad *, struct bna_rx *)); -void bna_rx_hds_disable(struct bna_rx *rx, - void (*cbfn)(struct bnad *, struct bna_rx *)); - /** * ENET */ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 68a275d66fc..26f5c5abfd1 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -167,13 +167,14 @@ bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, * Store only if not set earlier, since BNAD can override the HW * attributes */ - if (!ioceth->attr.num_txq) + if (!ioceth->attr.fw_query_complete) { ioceth->attr.num_txq = ntohl(rsp->max_cfg); - if (!ioceth->attr.num_rxp) ioceth->attr.num_rxp = ntohl(rsp->max_cfg); - ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); - ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; - ioceth->attr.max_rit_size = ntohl(rsp->rit_size); + ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = ntohl(rsp->rit_size); + ioceth->attr.fw_query_complete = true; + } bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP); } @@ -1693,6 +1694,16 @@ static struct bfa_ioc_cbfn bna_ioceth_cbfn = { bna_cb_ioceth_reset }; +static void bna_attr_init(struct bna_ioceth *ioceth) +{ + ioceth->attr.num_txq = BFI_ENET_DEF_TXQ; + ioceth->attr.num_rxp = BFI_ENET_DEF_RXP; + ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM; + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ; + ioceth->attr.fw_query_complete = false; +} + static void bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, struct bna_res_info *res_info) @@ -1738,6 +1749,8 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, ioceth->stop_cbfn = NULL; ioceth->stop_cbarg = NULL; + bna_attr_init(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); } @@ -2036,7 +2049,8 @@ bna_uninit(struct bna *bna) int bna_num_txq_set(struct bna *bna, int num_txq) { - if (num_txq > 0 && (num_txq <= bna->ioceth.attr.num_txq)) { + if (bna->ioceth.attr.fw_query_complete && + (num_txq <= bna->ioceth.attr.num_txq)) { bna->ioceth.attr.num_txq = num_txq; return BNA_CB_SUCCESS; } @@ -2047,7 +2061,8 @@ bna_num_txq_set(struct bna *bna, int num_txq) int bna_num_rxp_set(struct bna *bna, int num_rxp) { - if (num_rxp > 0 && (num_rxp <= bna->ioceth.attr.num_rxp)) { + if (bna->ioceth.attr.fw_query_complete && + (num_rxp <= bna->ioceth.attr.num_rxp)) { bna->ioceth.attr.num_rxp = num_rxp; return BNA_CB_SUCCESS; } diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 07bb7928982..dde8a463b8d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -30,6 +30,10 @@ * SW imposed limits * */ +#define BFI_ENET_DEF_TXQ 1 +#define BFI_ENET_DEF_RXP 1 +#define BFI_ENET_DEF_UCAM 1 +#define BFI_ENET_DEF_RITSZ 1 #define BFI_ENET_MAX_MCAM 256 @@ -99,6 +103,7 @@ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \ (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \ } #define ct2_reg_addr_init(_bna, _pcidev) \ diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index 8a6da0c3cd8..242d7997ffb 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -21,7 +21,6 @@ #include "cna.h" #include "bna_hw_defs.h" #include "bfa_cee.h" -#include "bfi_enet.h" #include "bfa_msgq.h" /** @@ -324,6 +323,7 @@ struct bna_qpt { }; struct bna_attr { + bool fw_query_complete; int num_txq; int num_rxp; int num_ucmac; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 6ad4b477a4e..b7f96ab8b30 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -102,6 +102,28 @@ bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) } } +static u32 +bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, + u32 index, u32 depth, struct sk_buff *skb, u32 frag) +{ + int j; + array[index].skb = NULL; + + dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr), + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&array[index], dma_addr, 0); + BNA_QE_INDX_ADD(index, 1, depth); + + for (j = 0; j < frag; j++) { + dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), + skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE); + dma_unmap_addr_set(&array[index], dma_addr, 0); + BNA_QE_INDX_ADD(index, 1, depth); + } + + return index; +} + /* * Frees all pending Tx Bufs * At this point no activity is expected on the Q, @@ -115,39 +137,20 @@ bnad_free_all_txbufs(struct bnad *bnad, struct bnad_unmap_q *unmap_q = tcb->unmap_q; struct bnad_skb_unmap *unmap_array; struct sk_buff *skb = NULL; - int i; + int q; unmap_array = unmap_q->unmap_array; - unmap_cons = 0; - while (unmap_cons < unmap_q->q_depth) { - skb = unmap_array[unmap_cons].skb; - if (!skb) { - unmap_cons++; + for (q = 0; q < unmap_q->q_depth; q++) { + skb = unmap_array[q].skb; + if (!skb) continue; - } - unmap_array[unmap_cons].skb = NULL; - - dma_unmap_single(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), skb_headlen(skb), - DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - if (++unmap_cons >= unmap_q->q_depth) - break; + unmap_cons = q; + unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, + unmap_cons, unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - dma_unmap_page(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), - skb_shinfo(skb)->frags[i].size, - DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, - 0); - if (++unmap_cons >= unmap_q->q_depth) - break; - } dev_kfree_skb_any(skb); } } @@ -164,12 +167,11 @@ static u32 bnad_free_txbufs(struct bnad *bnad, struct bna_tcb *tcb) { - u32 sent_packets = 0, sent_bytes = 0; - u16 wis, unmap_cons, updated_hw_cons; + u32 unmap_cons, sent_packets = 0, sent_bytes = 0; + u16 wis, updated_hw_cons; struct bnad_unmap_q *unmap_q = tcb->unmap_q; struct bnad_skb_unmap *unmap_array; struct sk_buff *skb; - int i; /* * Just return if TX is stopped. This check is useful @@ -195,32 +197,14 @@ bnad_free_txbufs(struct bnad *bnad, while (wis) { skb = unmap_array[unmap_cons].skb; - unmap_array[unmap_cons].skb = NULL; - sent_packets++; sent_bytes += skb->len; wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); - dma_unmap_single(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), skb_headlen(skb), - DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); - - prefetch(&unmap_array[unmap_cons + 1]); - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - prefetch(&unmap_array[unmap_cons + 1]); - - dma_unmap_page(&bnad->pcidev->dev, - dma_unmap_addr(&unmap_array[unmap_cons], - dma_addr), - skb_shinfo(skb)->frags[i].size, - DMA_TO_DEVICE); - dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, - 0); - BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); - } + unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, + unmap_cons, unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); + dev_kfree_skb_any(skb); } @@ -383,14 +367,14 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); while (to_alloc--) { - if (!wi_range) { + if (!wi_range) BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); - } skb = netdev_alloc_skb_ip_align(bnad->netdev, rcb->rxq->buffer_size); if (unlikely(!skb)) { BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; goto finishing; } unmap_array[unmap_prod].skb = skb; @@ -535,16 +519,12 @@ next: BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); - if (likely(ccb)) { - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - bna_ib_ack(ccb->i_dbell, packets); - bnad_refill_rxq(bnad, ccb->rcb[0]); - if (ccb->rcb[1]) - bnad_refill_rxq(bnad, ccb->rcb[1]); - } else { - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - bna_ib_ack(ccb->i_dbell, 0); - } + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack_disable_irq(ccb->i_dbell, packets); + + bnad_refill_rxq(bnad, ccb->rcb[0]); + if (ccb->rcb[1]) + bnad_refill_rxq(bnad, ccb->rcb[1]); clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); @@ -552,37 +532,15 @@ next: } static void -bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) -{ - if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - return; - - bna_ib_coalescing_timer_set(ccb->i_dbell, 0); - bna_ib_ack(ccb->i_dbell, 0); -} - -static void -bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) -{ - unsigned long flags; - - /* Because of polling context */ - spin_lock_irqsave(&bnad->bna_lock, flags); - bnad_enable_rx_irq_unsafe(ccb); - spin_unlock_irqrestore(&bnad->bna_lock, flags); -} - -static void bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); struct napi_struct *napi = &rx_ctrl->napi; if (likely(napi_schedule_prep(napi))) { - bnad_disable_rx_irq(bnad, ccb); __napi_schedule(napi); + rx_ctrl->rx_schedule++; } - BNAD_UPDATE_CTR(bnad, netif_rx_schedule); } /* MSIX Rx Path Handler */ @@ -590,9 +548,11 @@ static irqreturn_t bnad_msix_rx(int irq, void *data) { struct bna_ccb *ccb = (struct bna_ccb *)data; - struct bnad *bnad = ccb->bnad; - bnad_netif_rx_schedule_poll(bnad, ccb); + if (ccb) { + ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; + bnad_netif_rx_schedule_poll(ccb->bnad, ccb); + } return IRQ_HANDLED; } @@ -607,10 +567,11 @@ bnad_msix_mbox_handler(int irq, void *data) unsigned long flags; struct bnad *bnad = (struct bnad *)data; - if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) - return IRQ_HANDLED; - spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_HANDLED; + } bna_intr_status_get(&bnad->bna, intr_status); @@ -633,15 +594,18 @@ bnad_isr(int irq, void *data) struct bnad_rx_ctrl *rx_ctrl; struct bna_tcb *tcb = NULL; - if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); return IRQ_NONE; + } bna_intr_status_get(&bnad->bna, intr_status); - if (unlikely(!intr_status)) + if (unlikely(!intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); return IRQ_NONE; - - spin_lock_irqsave(&bnad->bna_lock, flags); + } if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) bna_mbox_handler(&bnad->bna, intr_status); @@ -1001,7 +965,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) mdelay(BNAD_TXRX_SYNC_MDELAY); - for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) { + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { rx_ctrl = &rx_info->rx_ctrl[i]; ccb = rx_ctrl->ccb; if (!ccb) @@ -1030,7 +994,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) int i; int j; - for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) { + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { rx_ctrl = &rx_info->rx_ctrl[i]; ccb = rx_ctrl->ccb; if (!ccb) @@ -1658,32 +1622,32 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) { struct bnad_rx_ctrl *rx_ctrl = container_of(napi, struct bnad_rx_ctrl, napi); - struct bna_ccb *ccb; - struct bnad *bnad; + struct bnad *bnad = rx_ctrl->bnad; int rcvd = 0; - ccb = rx_ctrl->ccb; - - bnad = ccb->bnad; + rx_ctrl->rx_poll_ctr++; if (!netif_carrier_ok(bnad->netdev)) goto poll_exit; - rcvd = bnad_poll_cq(bnad, ccb, budget); - if (rcvd == budget) + rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); + if (rcvd >= budget) return rcvd; poll_exit: - napi_complete((napi)); + napi_complete(napi); + + rx_ctrl->rx_complete++; - BNAD_UPDATE_CTR(bnad, netif_rx_complete); + if (rx_ctrl->ccb) + bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); - bnad_enable_rx_irq(bnad, ccb); return rcvd; } +#define BNAD_NAPI_POLL_QUOTA 64 static void -bnad_napi_enable(struct bnad *bnad, u32 rx_id) +bnad_napi_init(struct bnad *bnad, u32 rx_id) { struct bnad_rx_ctrl *rx_ctrl; int i; @@ -1691,9 +1655,20 @@ bnad_napi_enable(struct bnad *bnad, u32 rx_id) /* Initialize & enable NAPI */ for (i = 0; i < bnad->num_rxp_per_rx; i++) { rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; - netif_napi_add(bnad->netdev, &rx_ctrl->napi, - bnad_napi_poll_rx, 64); + bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); + } +} + +static void +bnad_napi_enable(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_ctrl *rx_ctrl; + int i; + + /* Initialize & enable NAPI */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; napi_enable(&rx_ctrl->napi); } @@ -1732,6 +1707,9 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) bnad_tx_msix_unregister(bnad, tx_info, bnad->num_txq_per_tx); + if (0 == tx_id) + tasklet_kill(&bnad->tx_free_tasklet); + spin_lock_irqsave(&bnad->bna_lock, flags); bna_tx_destroy(tx_info->tx); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -1739,9 +1717,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) tx_info->tx = NULL; tx_info->tx_id = 0; - if (0 == tx_id) - tasklet_kill(&bnad->tx_free_tasklet); - bnad_tx_res_free(bnad, res_info); } @@ -1852,6 +1827,16 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; } +static void +bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + int i; + + for (i = 0; i < bnad->num_rxp_per_rx; i++) + rx_info->rx_ctrl[i].bnad = bnad; +} + /* Called with mutex_lock(&bnad->conf_mutex) held */ void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) @@ -1860,23 +1845,23 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; unsigned long flags; - int dim_timer_del = 0; + int to_del = 0; if (!rx_info->rx) return; if (0 == rx_id) { spin_lock_irqsave(&bnad->bna_lock, flags); - dim_timer_del = bnad_dim_timer_running(bnad); - if (dim_timer_del) + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + to_del = 1; + } spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (dim_timer_del) + if (to_del) del_timer_sync(&bnad->dim_timer); } - bnad_napi_disable(bnad, rx_id); - init_completion(&bnad->bnad_completions.rx_comp); spin_lock_irqsave(&bnad->bna_lock, flags); bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); @@ -1886,11 +1871,14 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); + bnad_napi_disable(bnad, rx_id); + spin_lock_irqsave(&bnad->bna_lock, flags); bna_rx_destroy(rx_info->rx); spin_unlock_irqrestore(&bnad->bna_lock, flags); rx_info->rx = NULL; + rx_info->rx_id = 0; bnad_rx_res_free(bnad, res_info); } @@ -1939,15 +1927,25 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) if (err) return err; + bnad_rx_ctrl_init(bnad, rx_id); + /* Ask BNA to create one Rx object, supplying required resources */ spin_lock_irqsave(&bnad->bna_lock, flags); rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, rx_info); spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (!rx) + if (!rx) { + err = -ENOMEM; goto err_return; + } rx_info->rx = rx; + /* + * Init NAPI, so that state is set to NAPI_STATE_SCHED, + * so that IRQ handler cannot schedule NAPI at this point. + */ + bnad_napi_init(bnad, rx_id); + /* Register ISR for the Rx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { err = bnad_rx_msix_register(bnad, rx_info, rx_id, @@ -1956,9 +1954,6 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) goto err_return; } - /* Enable NAPI */ - bnad_napi_enable(bnad, rx_id); - spin_lock_irqsave(&bnad->bna_lock, flags); if (0 == rx_id) { /* Set up Dynamic Interrupt Moderation Vector */ @@ -1975,6 +1970,9 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) bna_rx_enable(rx); spin_unlock_irqrestore(&bnad->bna_lock, flags); + /* Enable scheduling of NAPI */ + bnad_napi_enable(bnad, rx_id); + return 0; err_return: @@ -2014,7 +2012,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad) /* * Called with bnad->bna_lock held */ -static int +int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) { int ret; @@ -2034,7 +2032,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) } /* Should be called with conf_lock held */ -static int +int bnad_enable_default_bcast(struct bnad *bnad) { struct bnad_rx_info *rx_info = &bnad->rx_info[0]; @@ -2059,15 +2057,13 @@ bnad_enable_default_bcast(struct bnad *bnad) return 0; } -/* Called with bnad_conf_lock() held */ -static void +/* Called with mutex_lock(&bnad->conf_mutex) held */ +void bnad_restore_vlans(struct bnad *bnad, u32 rx_id) { u16 vid; unsigned long flags; - BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX)); - for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { spin_lock_irqsave(&bnad->bna_lock, flags); bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); @@ -2176,9 +2172,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) { int err; - /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */ - BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 || - skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)); if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) { @@ -2205,7 +2198,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) } else { struct ipv6hdr *ipv6h = ipv6_hdr(skb); - BUG_ON(!(skb->protocol == htons(ETH_P_IPV6))); ipv6h->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, @@ -2227,7 +2219,7 @@ bnad_q_num_init(struct bnad *bnad) int rxps; rxps = min((uint)num_online_cpus(), - (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX)); + (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); if (!(bnad->cfg_flags & BNAD_CF_MSIX)) rxps = 1; /* INTx */ @@ -2356,15 +2348,16 @@ bnad_enable_msix(struct bnad *bnad) ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); if (ret > 0) { /* Not enough MSI-X vectors. */ + pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", + ret, bnad->msix_num); spin_lock_irqsave(&bnad->bna_lock, flags); /* ret = #of vectors that we got */ - bnad_q_num_adjust(bnad, ret, 0); + bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, + (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); spin_unlock_irqrestore(&bnad->bna_lock, flags); - bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) - + (bnad->num_rx - * bnad->num_rxp_per_rx) + + bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + BNAD_MAILBOX_MSIX_VECTORS; if (bnad->msix_num > ret) @@ -2385,6 +2378,7 @@ bnad_enable_msix(struct bnad *bnad) return; intx_mode: + pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); kfree(bnad->msix_table); bnad->msix_table = NULL; @@ -2521,30 +2515,44 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) u32 unmap_prod, wis, wis_used, wi_range; u32 vectors, vect_id, i, acked; int err; + unsigned int len; + u32 gso_size; struct bnad_unmap_q *unmap_q = tcb->unmap_q; dma_addr_t dma_addr; struct bna_txq_entry *txqent; u16 flags; - if (unlikely - (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) { + if (unlikely(skb->len <= ETH_HLEN)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_too_short); + return NETDEV_TX_OK; + } + if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) { dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long); + return NETDEV_TX_OK; + } + if (unlikely(skb_headlen(skb) == 0)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); return NETDEV_TX_OK; } /* * Takes care of the Tx that is scheduled between clearing the flag - * and the netif_stop_all_queue() call. + * and the netif_tx_stop_all_queues() call. */ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_stopping); return NETDEV_TX_OK; } vectors = 1 + skb_shinfo(skb)->nr_frags; - if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { + if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); return NETDEV_TX_OK; } wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ @@ -2582,18 +2590,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) } unmap_prod = unmap_q->producer_index; - wis_used = 1; - vect_id = 0; flags = 0; txq_prod = tcb->producer_index; BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); - BUG_ON(!(wi_range <= tcb->q_depth)); txqent->hdr.wi.reserved = 0; txqent->hdr.wi.num_vectors = vectors; - txqent->hdr.wi.opcode = - htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO : - BNA_TXQ_WI_SEND)); if (vlan_tx_tag_present(skb)) { vlan_tag = (u16) vlan_tx_tag_get(skb); @@ -2608,62 +2610,93 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) txqent->hdr.wi.vlan_tag = htons(vlan_tag); if (skb_is_gso(skb)) { + gso_size = skb_shinfo(skb)->gso_size; + + if (unlikely(gso_size > netdev->mtu)) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); + return NETDEV_TX_OK; + } + if (unlikely((gso_size + skb_transport_offset(skb) + + tcp_hdrlen(skb)) >= skb->len)) { + txqent->hdr.wi.opcode = + __constant_htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); + } else { + txqent->hdr.wi.opcode = + __constant_htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.lso_mss = htons(gso_size); + } + err = bnad_tso_prepare(bnad, skb); - if (err) { + if (unlikely(err)) { dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); return NETDEV_TX_OK; } - txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb)); flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); txqent->hdr.wi.l4_hdr_size_n_offset = htons(BNA_TXQ_WI_L4_HDR_N_OFFSET (tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 proto = 0; - + } else { + txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; - if (skb->protocol == htons(ETH_P_IP)) - proto = ip_hdr(skb)->protocol; - else if (skb->protocol == htons(ETH_P_IPV6)) { - /* nexthdr may not be TCP immediately. */ - proto = ipv6_hdr(skb)->nexthdr; + if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); + return NETDEV_TX_OK; } - if (proto == IPPROTO_TCP) { - flags |= BNA_TXQ_WI_CF_TCP_CKSUM; - txqent->hdr.wi.l4_hdr_size_n_offset = - htons(BNA_TXQ_WI_L4_HDR_N_OFFSET - (0, skb_transport_offset(skb))); - BNAD_UPDATE_CTR(bnad, tcpcsum_offload); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 proto = 0; - BUG_ON(!(skb_headlen(skb) >= - skb_transport_offset(skb) + tcp_hdrlen(skb))); - - } else if (proto == IPPROTO_UDP) { - flags |= BNA_TXQ_WI_CF_UDP_CKSUM; - txqent->hdr.wi.l4_hdr_size_n_offset = - htons(BNA_TXQ_WI_L4_HDR_N_OFFSET - (0, skb_transport_offset(skb))); - - BNAD_UPDATE_CTR(bnad, udpcsum_offload); + if (skb->protocol == __constant_htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == + __constant_htons(ETH_P_IPV6)) { + /* nexthdr may not be TCP immediately. */ + proto = ipv6_hdr(skb)->nexthdr; + } + if (proto == IPPROTO_TCP) { + flags |= BNA_TXQ_WI_CF_TCP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, tcpcsum_offload); + + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + tcp_hdrlen(skb))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); + return NETDEV_TX_OK; + } - BUG_ON(!(skb_headlen(skb) >= - skb_transport_offset(skb) + - sizeof(struct udphdr))); - } else { - err = skb_checksum_help(skb); - BNAD_UPDATE_CTR(bnad, csum_help); - if (err) { + } else if (proto == IPPROTO_UDP) { + flags |= BNA_TXQ_WI_CF_UDP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, udpcsum_offload); + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + + sizeof(struct udphdr))) { + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); + return NETDEV_TX_OK; + } + } else { dev_kfree_skb(skb); - BNAD_UPDATE_CTR(bnad, csum_help_err); + BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); return NETDEV_TX_OK; } + } else { + txqent->hdr.wi.l4_hdr_size_n_offset = 0; } - } else { - txqent->hdr.wi.lso_mss = 0; - txqent->hdr.wi.l4_hdr_size_n_offset = 0; } txqent->hdr.wi.flags = htons(flags); @@ -2671,20 +2704,37 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) txqent->hdr.wi.frame_length = htonl(skb->len); unmap_q->unmap_array[unmap_prod].skb = skb; - BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); - txqent->vector[vect_id].length = htons(skb_headlen(skb)); + len = skb_headlen(skb); + txqent->vector[0].length = htons(len); dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, dma_addr); - BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + vect_id = 0; + wis_used = 1; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; u16 size = frag->size; + if (unlikely(size == 0)) { + unmap_prod = unmap_q->producer_index; + + unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, + unmap_q->unmap_array, + unmap_prod, unmap_q->q_depth, skb, + i); + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); + return NETDEV_TX_OK; + } + + len += size; + if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { vect_id = 0; if (--wi_range) @@ -2695,22 +2745,34 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) wis_used = 0; BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); - BUG_ON(!(wi_range <= tcb->q_depth)); } wis_used++; - txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); + txqent->hdr.wi_ext.opcode = + __constant_htons(BNA_TXQ_WI_EXTENSION); } BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); txqent->vector[vect_id].length = htons(size); - dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page, - frag->page_offset, size, DMA_TO_DEVICE); + dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, + 0, size, DMA_TO_DEVICE); dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, dma_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); } + if (unlikely(len != skb->len)) { + unmap_prod = unmap_q->producer_index; + + unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, + unmap_q->unmap_array, unmap_prod, + unmap_q->q_depth, skb, + skb_shinfo(skb)->nr_frags); + dev_kfree_skb(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); + return NETDEV_TX_OK; + } + unmap_q->producer_index = unmap_prod; BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); tcb->producer_index = txq_prod; @@ -2721,6 +2783,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; bna_txq_prod_indx_doorbell(tcb); + smp_mb(); if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) tasklet_schedule(&bnad->tx_free_tasklet); @@ -2748,7 +2811,7 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) return stats; } -static void +void bnad_set_rx_mode(struct net_device *netdev) { struct bnad *bnad = netdev_priv(netdev); @@ -2787,6 +2850,9 @@ bnad_set_rx_mode(struct net_device *netdev) } } + if (bnad->rx_info[0].rx == NULL) + goto unlock; + bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); if (!netdev_mc_empty(netdev)) { @@ -2933,18 +2999,21 @@ bnad_netpoll(struct net_device *netdev) bnad_isr(bnad->pcidev->irq, netdev); bna_intx_enable(&bnad->bna, curr_mask); } else { + /* + * Tx processing may happen in sending context, so no need + * to explicitly process completions here + */ + + /* Rx processing */ for (i = 0; i < bnad->num_rx; i++) { rx_info = &bnad->rx_info[i]; if (!rx_info->rx) continue; for (j = 0; j < bnad->num_rxp_per_rx; j++) { rx_ctrl = &rx_info->rx_ctrl[j]; - if (rx_ctrl->ccb) { - bnad_disable_rx_irq(bnad, - rx_ctrl->ccb); + if (rx_ctrl->ccb) bnad_netif_rx_schedule_poll(bnad, rx_ctrl->ccb); - } } } } @@ -3126,7 +3195,7 @@ static int __devinit bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id) { - bool using_dac = false; + bool using_dac; int err; struct bnad *bnad; struct bna *bna; @@ -3249,12 +3318,19 @@ bnad_pci_probe(struct pci_dev *pdev, bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) err = -EIO; } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (err) + goto disable_ioceth; + + spin_lock_irqsave(&bnad->bna_lock, flags); bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); spin_unlock_irqrestore(&bnad->bna_lock, flags); err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); - if (err) + if (err) { + err = -EIO; goto disable_ioceth; + } spin_lock_irqsave(&bnad->bna_lock, flags); bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); @@ -3266,6 +3342,8 @@ bnad_pci_probe(struct pci_dev *pdev, bnad_set_netdev_perm_addr(bnad); spin_unlock_irqrestore(&bnad->bna_lock, flags); + mutex_unlock(&bnad->conf_mutex); + /* Finally, reguister with net_device layer */ err = register_netdev(netdev); if (err) { @@ -3274,6 +3352,8 @@ bnad_pci_probe(struct pci_dev *pdev, } set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); + return 0; + probe_success: mutex_unlock(&bnad->conf_mutex); return 0; diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 5b5451edf49..1c9328d564d 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -38,12 +38,12 @@ #define BNAD_TXQ_DEPTH 2048 #define BNAD_RXQ_DEPTH 2048 -#define BNAD_MAX_TXS 1 +#define BNAD_MAX_TX 1 #define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ #define BNAD_TXQ_NUM 1 -#define BNAD_MAX_RXS 1 -#define BNAD_MAX_RXPS_PER_RX 16 +#define BNAD_MAX_RX 1 +#define BNAD_MAX_RXP_PER_RX 16 #define BNAD_MAX_RXQ_PER_RXP 2 /* @@ -53,21 +53,25 @@ */ struct bnad_rx_ctrl { struct bna_ccb *ccb; + struct bnad *bnad; unsigned long flags; struct napi_struct napi; + u64 rx_intr_ctr; + u64 rx_poll_ctr; + u64 rx_schedule; + u64 rx_keep_poll; + u64 rx_complete; }; #define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC -#define BNAD_GET_TX_ID(_skb) (0) - /* * GLOBAL #defines (CONSTANTS) */ #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.0.2.0" +#define BNAD_VERSION "3.0.2.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 @@ -82,6 +86,10 @@ struct bnad_rx_ctrl { #define BNAD_MAX_Q_DEPTH 0x10000 #define BNAD_MIN_Q_DEPTH 0x200 +#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq) +/* keeping MAX TX and RX Q depth equal */ +#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH + #define BNAD_JUMBO_MTU 9000 #define BNAD_NETIF_WAKE_THRESHOLD 8 @@ -146,16 +154,26 @@ struct bnad_drv_stats { u64 tcpcsum_offload; u64 udpcsum_offload; u64 csum_help; - u64 csum_help_err; + u64 tx_skb_too_short; + u64 tx_skb_stopping; + u64 tx_skb_max_vectors; + u64 tx_skb_mss_too_long; + u64 tx_skb_tso_too_short; + u64 tx_skb_tso_prepare; + u64 tx_skb_non_tso_too_long; + u64 tx_skb_tcp_hdr; + u64 tx_skb_udp_hdr; + u64 tx_skb_csum_err; + u64 tx_skb_headlen_too_long; + u64 tx_skb_headlen_zero; + u64 tx_skb_frag_zero; + u64 tx_skb_len_mismatch; u64 hw_stats_updates; - u64 netif_rx_schedule; - u64 netif_rx_complete; u64 netif_rx_dropped; u64 link_toggle; u64 cee_toggle; - u64 cee_up; u64 rxp_info_alloc_failed; u64 mbox_intr_disabled; @@ -190,7 +208,7 @@ struct bnad_tx_info { struct bnad_rx_info { struct bna_rx *rx; /* 1:1 between rx_info & rx */ - struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX]; + struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; u32 rx_id; } ____cacheline_aligned; @@ -234,8 +252,8 @@ struct bnad { struct net_device *netdev; /* Data path */ - struct bnad_tx_info tx_info[BNAD_MAX_TXS]; - struct bnad_rx_info rx_info[BNAD_MAX_RXS]; + struct bnad_tx_info tx_info[BNAD_MAX_TX]; + struct bnad_rx_info rx_info[BNAD_MAX_RX]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; /* @@ -255,8 +273,8 @@ struct bnad { u8 tx_coalescing_timeo; u8 rx_coalescing_timeo; - struct bna_rx_config rx_config[BNAD_MAX_RXS]; - struct bna_tx_config tx_config[BNAD_MAX_TXS]; + struct bna_rx_config rx_config[BNAD_MAX_RX]; + struct bna_tx_config tx_config[BNAD_MAX_TX]; void __iomem *bar0; /* BAR0 address */ @@ -283,8 +301,8 @@ struct bnad { /* Control path resources, memory & irq */ struct bna_res_info res_info[BNA_RES_T_MAX]; struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; - struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS]; - struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS]; + struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; + struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; struct bnad_completion bnad_completions; @@ -314,6 +332,12 @@ extern u32 bnad_rxqs_per_cq; */ extern u32 *cna_get_firmware_buf(struct pci_dev *pdev); /* Netdev entry point prototypes */ +extern void bnad_set_rx_mode(struct net_device *netdev); +extern struct net_device_stats *bnad_get_netdev_stats( + struct net_device *netdev); +extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); +extern int bnad_enable_default_bcast(struct bnad *bnad); +extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); extern void bnad_set_ethtool_ops(struct net_device *netdev); /* Configuration & setup */ @@ -345,15 +369,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, #define bnad_enable_rx_irq_unsafe(_ccb) \ { \ - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\ + if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ (_ccb)->rx_coalescing_timeo); \ bna_ib_ack((_ccb)->i_dbell, 0); \ } \ } -#define bnad_dim_timer_running(_bnad) \ - (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \ - (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags)))) - #endif /* __BNAD_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 1c19dcea83c..48422244397 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -75,14 +75,25 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tcpcsum_offload", "udpcsum_offload", "csum_help", - "csum_help_err", + "tx_skb_too_short", + "tx_skb_stopping", + "tx_skb_max_vectors", + "tx_skb_mss_too_long", + "tx_skb_tso_too_short", + "tx_skb_tso_prepare", + "tx_skb_non_tso_too_long", + "tx_skb_tcp_hdr", + "tx_skb_udp_hdr", + "tx_skb_csum_err", + "tx_skb_headlen_too_long", + "tx_skb_headlen_zero", + "tx_skb_frag_zero", + "tx_skb_len_mismatch", "hw_stats_updates", - "netif_rx_schedule", - "netif_rx_complete", "netif_rx_dropped", "link_toggle", - "cee_up", + "cee_toggle", "rxp_info_alloc_failed", "mbox_intr_disabled", @@ -201,6 +212,20 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "rad_rx_bcast_vlan", "rad_rx_drops", + "rlb_rad_rx_frames", + "rlb_rad_rx_octets", + "rlb_rad_rx_vlan_frames", + "rlb_rad_rx_ucast", + "rlb_rad_rx_ucast_octets", + "rlb_rad_rx_ucast_vlan", + "rlb_rad_rx_mcast", + "rlb_rad_rx_mcast_octets", + "rlb_rad_rx_mcast_vlan", + "rlb_rad_rx_bcast", + "rlb_rad_rx_bcast_octets", + "rlb_rad_rx_bcast_vlan", + "rlb_rad_rx_drops", + "fc_rx_ucast_octets", "fc_rx_ucast", "fc_rx_ucast_vlan", @@ -321,7 +346,7 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; - int dim_timer_del = 0; + int to_del = 0; if (coalesce->rx_coalesce_usecs == 0 || coalesce->rx_coalesce_usecs > @@ -348,14 +373,17 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) } else { if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; - dim_timer_del = bnad_dim_timer_running(bnad); - if (dim_timer_del) { + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags)) { clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - del_timer_sync(&bnad->dim_timer); - spin_lock_irqsave(&bnad->bna_lock, flags); + to_del = 1; } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); bnad_rx_coalescing_timeo_set(bnad); } } @@ -390,10 +418,10 @@ bnad_get_ringparam(struct net_device *netdev, { struct bnad *bnad = netdev_priv(netdev); - ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq; + ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH; ringparam->rx_mini_max_pending = 0; ringparam->rx_jumbo_max_pending = 0; - ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH; + ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH; ringparam->rx_pending = bnad->rxq_depth; ringparam->rx_mini_max_pending = 0; @@ -407,6 +435,7 @@ bnad_set_ringparam(struct net_device *netdev, { int i, current_err, err = 0; struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; mutex_lock(&bnad->conf_mutex); if (ringparam->rx_pending == bnad->rxq_depth && @@ -416,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev, } if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || - ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq || + ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH || !BNA_POWER_OF_2(ringparam->rx_pending)) { mutex_unlock(&bnad->conf_mutex); return -EINVAL; } if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || - ringparam->tx_pending > BNAD_MAX_Q_DEPTH || + ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH || !BNA_POWER_OF_2(ringparam->tx_pending)) { mutex_unlock(&bnad->conf_mutex); return -EINVAL; @@ -430,6 +459,11 @@ bnad_set_ringparam(struct net_device *netdev, if (ringparam->rx_pending != bnad->rxq_depth) { bnad->rxq_depth = ringparam->rx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + for (i = 0; i < bnad->num_rx; i++) { if (!bnad->rx_info[i].rx) continue; @@ -437,10 +471,26 @@ bnad_set_ringparam(struct net_device *netdev, current_err = bnad_setup_rx(bnad, i); if (current_err && !err) err = current_err; + if (!err) + bnad_restore_vlans(bnad, i); + } + + if (!err && bnad->rx_info[0].rx) { + /* restore rx configuration */ + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_set_rx_mode(netdev); } } if (ringparam->tx_pending != bnad->txq_depth) { bnad->txq_depth = ringparam->tx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + for (i = 0; i < bnad->num_tx; i++) { if (!bnad->tx_info[i].tx) continue; @@ -578,6 +628,16 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) sprintf(string, "cq%d_hw_producer_index", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_intr", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_schedule", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_keep_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_complete", q_num); + string += ETH_GSTRING_LEN; q_num++; } } @@ -660,7 +720,7 @@ static int bnad_get_stats_count_locked(struct net_device *netdev) { struct bnad *bnad = netdev_priv(netdev); - int i, j, count, rxf_active_num = 0, txf_active_num = 0; + int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0; u32 bmap; bmap = bna_tx_rid_mask(&bnad->bna); @@ -718,6 +778,17 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) buf[bi++] = 0; /* ccb->consumer_index */ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. ccb->hw_producer_index); + + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_intr_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_poll_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_schedule; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_keep_poll; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_complete; } } for (i = 0; i < bnad->num_rx; i++) { diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 50fce15feac..cb4874210aa 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -21,21 +21,18 @@ #include <linux/kernel.h> #include <linux/types.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/timer.h> #include <linux/interrupt.h> +#include <linux/if_vlan.h> #include <linux/if_ether.h> -#include <asm/page.h> -#include <asm/io.h> -#include <asm/string.h> - -#include <linux/list.h> #define bfa_sm_fault(__event) do { \ - pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \ - __event); \ + pr_err("SM Assertion failure: %s: %d: event = %d\n", \ + __FILE__, __LINE__, __event); \ } while (0) extern char bfa_version[]; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index c00e706ab58..98849a1fc74 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -25,6 +25,7 @@ if NET_ATMEL config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" depends on ARM && ARCH_AT91RM9200 + select NET_CORE select MII ---help--- If you wish to compile a kernel for the AT91RM9200 and enable diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index e0cacf66291..e9386ef524a 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -21,6 +21,7 @@ if NET_VENDOR_CIRRUS config EP93XX_ETH tristate "EP93xx Ethernet support" depends on ARM && ARCH_EP93XX + select NET_CORE select MII help This is a driver for the ethernet hardware included in EP93xx CPUs. diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c751c25d301..19c9272b8f1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -591,9 +591,9 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic, for (frag = skb_shinfo(skb)->frags; len_left; frag++) { len_left -= frag->size; enic_queue_wq_desc_cont(wq, skb, - pci_map_page(enic->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE), + skb_frag_dma_map(&enic->pdev->dev, + frag, 0, frag->size, + PCI_DMA_TODEVICE), frag->size, (len_left == 0), /* EOP? */ loopback); @@ -705,14 +705,14 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, for (frag = skb_shinfo(skb)->frags; len_left; frag++) { len_left -= frag->size; frag_len_left = frag->size; - offset = frag->page_offset; + offset = 0; while (frag_len_left) { len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); - dma_addr = pci_map_page(enic->pdev, frag->page, - offset, len, - PCI_DMA_TODEVICE); + dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, + offset, len, + PCI_DMA_TODEVICE); enic_queue_wq_desc_cont(wq, skb, dma_addr, len, diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 73c5d2080f2..972b62b3183 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -6,6 +6,7 @@ config DM9000 tristate "DM9000 support" depends on ARM || BLACKFIN || MIPS select CRC32 + select NET_CORE select MII ---help--- Support for DM9000 chipset. diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index f6af772b12c..1203be0436e 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -125,6 +125,7 @@ config WINBOND_840 tristate "Winbond W89c840 Ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver is for the Winbond W89c840 chip. It also works with diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index 84a28a66816..b5afe218c31 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -66,6 +66,7 @@ config SUNDANCE tristate "Sundance Alta support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver is for the Sundance "Alta" chip. diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3d55b4767ae..2b7d1ba1e13 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -638,8 +638,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - busaddr = dma_map_page(dev, frag->page, frag->page_offset, - frag->size, DMA_TO_DEVICE); + busaddr = skb_frag_dma_map(dev, frag, 0, + frag->size, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; wrb = queue_head_node(txq); @@ -1066,7 +1066,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, skb->tail += curr_frag_len; } else { skb_shinfo(skb)->nr_frags = 1; - skb_shinfo(skb)->frags[0].page = page_info->page; + skb_frag_set_page(skb, 0, page_info->page); skb_shinfo(skb)->frags[0].page_offset = page_info->page_offset + hdr_len; skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; @@ -1091,7 +1091,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, if (page_info->page_offset == 0) { /* Fresh page */ j++; - skb_shinfo(skb)->frags[j].page = page_info->page; + skb_frag_set_page(skb, j, page_info->page); skb_shinfo(skb)->frags[j].page_offset = page_info->page_offset; skb_shinfo(skb)->frags[j].size = 0; @@ -1173,7 +1173,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, if (i == 0 || page_info->page_offset == 0) { /* First frag or Fresh page */ j++; - skb_shinfo(skb)->frags[j].page = page_info->page; + skb_frag_set_page(skb, j, page_info->page); skb_shinfo(skb)->frags[j].page_offset = page_info->page_offset; skb_shinfo(skb)->frags[j].size = 0; diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index 5918c689169..b8974b9e3b4 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -21,6 +21,7 @@ if NET_VENDOR_FARADAY config FTMAC100 tristate "Faraday FTMAC100 10/100 Ethernet support" depends on ARM + select NET_CORE select MII ---help--- This driver supports the FTMAC100 10/100 Ethernet controller diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index be92229f2c2..268414d9f2c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -1,6 +1,7 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) + select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 81d409d08c9..83199fd0d62 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2140,11 +2140,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (i == nr_frags - 1) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - bufaddr = dma_map_page(&priv->ofdev->dev, - skb_shinfo(skb)->frags[i].page, - skb_shinfo(skb)->frags[i].page_offset, - length, - DMA_TO_DEVICE); + bufaddr = skb_frag_dma_map(&priv->ofdev->dev, + &skb_shinfo(skb)->frags[i], + 0, + length, + DMA_TO_DEVICE); /* set the TxBD length and buffer pointer */ txbdp->bufPtr = bufaddr; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index bba1ffcd92d..8cca4a62b39 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1002,9 +1002,8 @@ retry_bounce: unsigned long dma_addr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, - frag->page_offset, frag->size, - DMA_TO_DEVICE); + dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, + frag->size, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto map_failed_frags; diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig index e8882227626..3aff81d7989 100644 --- a/drivers/net/ethernet/icplus/Kconfig +++ b/drivers/net/ethernet/icplus/Kconfig @@ -5,6 +5,7 @@ config IP1000 tristate "IP1000 Gigabit Ethernet support" depends on PCI && EXPERIMENTAL + select NET_CORE select MII ---help--- This driver supports IP1000 gigabit Ethernet cards. diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 4a98e83812b..61029dc7fa6 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -21,6 +21,7 @@ if NET_VENDOR_INTEL config E100 tristate "Intel(R) PRO/100+ support" depends on PCI + select NET_CORE select MII ---help--- This driver supports Intel(R) PRO/100 family of adapters. diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 4a32c15524c..27f586afcc3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2911,9 +2911,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; len = frag->size; - offset = frag->page_offset; + offset = 0; while (len) { + unsigned long bufend; i++; if (unlikely(i == tx_ring->count)) i = 0; @@ -2927,18 +2928,19 @@ static int e1000_tx_map(struct e1000_adapter *adapter, /* Workaround for potential 82544 hang in PCI-X. * Avoid terminating buffers within evenly-aligned * dwords. */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; if (unlikely(adapter->pcix_82544 && - !((unsigned long)(page_to_phys(frag->page) + offset - + size - 1) & 4) && - size > 4)) + !(bufend & 4) && + size > 4)) size -= 4; buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(&pdev->dev, frag->page, - offset, size, - DMA_TO_DEVICE); + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 4f669995623..78c5d21fa34 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4677,7 +4677,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; len = frag->size; - offset = frag->page_offset; + offset = 0; while (len) { i++; @@ -4690,9 +4690,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = dma_map_page(&pdev->dev, frag->page, - offset, size, - DMA_TO_DEVICE); + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 80160849740..3cb1bc96bf7 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4174,10 +4174,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(dev, - frag->page, - frag->page_offset, - len, + buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, buffer_info->dma)) goto dma_error; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index a6bdb3c744f..b3d760b08a5 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2061,10 +2061,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; - buffer_info->dma = dma_map_page(&pdev->dev, - frag->page, - frag->page_offset, - len, + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index b8ef2c0fc5d..c8b9c9028bc 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1341,7 +1341,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, frag = &skb_shinfo(skb)->frags[f]; len = frag->size; - offset = frag->page_offset; + offset = 0; while (len) { i++; @@ -1361,8 +1361,8 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = - dma_map_page(&pdev->dev, frag->page, - offset, size, DMA_TO_DEVICE); + skb_frag_dma_map(&pdev->dev, frag, offset, size, + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a30f8266df0..d20e8040d85 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6494,8 +6494,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, offset = 0; tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE; - dma = dma_map_page(dev, frag->page, frag->page_offset, - size, DMA_TO_DEVICE); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_error; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 98963970206..d72905b77ab 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2918,18 +2918,16 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; len = min((unsigned int)frag->size, total); - offset = frag->page_offset; + offset = 0; while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, - frag->page, - offset, - size, - DMA_TO_DEVICE); + tx_buffer_info->dma = + skb_frag_dma_map(&adapter->pdev->dev, frag, + offset, size, DMA_TO_DEVICE); tx_buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) goto dma_error; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index a869ee47dde..48a0a23f342 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1928,8 +1928,9 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, - frag->page_offset, frag->size, hidma); + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, frag->size, hidma); } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1e2c9f072bf..7325737fe93 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -752,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; - desc->buf_ptr = dma_map_page(mp->dev->dev.parent, - this_frag->page, - this_frag->page_offset, - this_frag->size, DMA_TO_DEVICE); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, + this_frag->size, + DMA_TO_DEVICE); } } diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index bd090dbe3ad..d10c2e15f4e 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -22,6 +22,7 @@ if NET_VENDOR_MICREL config ARM_KS8695_ETHER tristate "KS8695 Ethernet support" depends on ARM && ARCH_KS8695 + select NET_CORE select MII ---help--- If you wish to compile a kernel for the KS8695 and want to @@ -38,6 +39,7 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI + select NET_CORE select MII select CRC32 ---help--- @@ -46,6 +48,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + select NET_CORE select MII ---help--- This platform driver is for Micrel KS8851 Address/data bus @@ -54,6 +57,7 @@ config KS8851_MLL config KSZ884X_PCI tristate "Micrel KSZ8841/2 PCI" depends on PCI + select NET_CORE select MII select CRC32 ---help--- diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 27418d31a09..710c4aead14 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4704,8 +4704,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) dma_buf->dma = pci_map_single( hw_priv->pdev, - page_address(this_frag->page) + - this_frag->page_offset, + skb_frag_address(this_frag), dma_buf->len, PCI_DMA_TODEVICE); set_tx_buf(desc, dma_buf->dma); diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 1a1e20e97a2..e0895e40f10 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1160,9 +1160,8 @@ again: if (!nr_frags) break; - buf = pci_map_page(dev->pci_dev, frag->page, - frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0, + frag->size, PCI_DMA_TODEVICE); dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", (long long)buf, (long) page_to_pfn(frag->page), frag->page_offset); diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig index 01182b55947..334c1718309 100644 --- a/drivers/net/ethernet/nuvoton/Kconfig +++ b/drivers/net/ethernet/nuvoton/Kconfig @@ -22,6 +22,7 @@ config W90P910_ETH tristate "Nuvoton w90p910 Ethernet support" depends on ARM && ARCH_W90X900 select PHYLIB + select NET_CORE select MII ---help--- Say Y here if you want to use built-in Ethernet ports diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 98bb64bc24d..4e39b8c0439 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2146,8 +2146,11 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, - PCI_DMA_TODEVICE); + np->put_tx_ctx->dma = skb_frag_dma_map( + &np->pci_dev->dev, + frag, offset, + bcnt, + PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); @@ -2257,8 +2260,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, - PCI_DMA_TODEVICE); + np->put_tx_ctx->dma = skb_frag_dma_map( + &np->pci_dev->dev, + frag, offset, + bcnt, + PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index c85709d6ff1..7efa6242723 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -5,6 +5,7 @@ config PCH_GBE tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)" depends on PCI + select NET_CORE select MII ---help--- This is a gigabit ethernet driver for EG20T PCH. diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index 4add1db20f1..b97132d9dff 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -20,6 +20,7 @@ if NET_PACKET_ENGINE config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" depends on PCI + select NET_CORE select MII ---help--- If you have a Gigabit Ethernet card of this type, say Y and read diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index fad620da7c1..53220958832 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1505,9 +1505,8 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - map[i+1] = pci_map_page(mac->dma_pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, + frag->size, PCI_DMA_TODEVICE); map_size[i+1] = frag->size; if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { nfrags = i; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index de18e4753b6..694130ebc75 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1844,8 +1844,8 @@ netxen_map_tx_skb(struct pci_dev *pdev, frag = &skb_shinfo(skb)->frags[i]; nf = &pbuf->frag_array[i+1]; - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, + PCI_DMA_TODEVICE); if (pci_dma_mapping_error(pdev, map)) goto unwind; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 8cab61c08c8..1871d88ee71 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2388,9 +2388,8 @@ static int ql_send_map(struct ql3_adapter *qdev, seg++; } - map = pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size, + PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 690c93f76ae..501e16b9c2a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2135,8 +2135,8 @@ qlcnic_map_tx_skb(struct pci_dev *pdev, frag = &skb_shinfo(skb)->frags[i]; nf = &pbuf->frag_array[i+1]; - map = pci_map_page(pdev, frag->page, frag->page_offset, - frag->size, PCI_DMA_TODEVICE); + map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, + PCI_DMA_TODEVICE); if (pci_dma_mapping_error(pdev, map)) goto unwind; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 39360c48586..ce6c6fee308 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1431,10 +1431,8 @@ static int ql_map_send(struct ql_adapter *qdev, map_idx++; } - map = - pci_map_page(qdev->pdev, frag->page, - frag->page_offset, frag->size, - PCI_DMA_TODEVICE); + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size, + PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { @@ -1477,8 +1475,6 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, { struct sk_buff *skb; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct skb_frag_struct *rx_frag; - int nr_frags; struct napi_struct *napi = &rx_ring->napi; napi->dev = qdev->ndev; @@ -1492,12 +1488,10 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, return; } prefetch(lbq_desc->p.pg_chunk.va); - rx_frag = skb_shinfo(skb)->frags; - nr_frags = skb_shinfo(skb)->nr_frags; - rx_frag += nr_frags; - rx_frag->page = lbq_desc->p.pg_chunk.page; - rx_frag->page_offset = lbq_desc->p.pg_chunk.offset; - rx_frag->size = length; + __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); skb->len += length; skb->data_len += length; diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig index 2055f7eb2ba..c8ba4b3494c 100644 --- a/drivers/net/ethernet/rdc/Kconfig +++ b/drivers/net/ethernet/rdc/Kconfig @@ -22,6 +22,7 @@ config R6040 tristate "RDC R6040 Fast Ethernet Adapter support" depends on PCI select CRC32 + select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 5d2d1b8678f..c77d5af676a 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -784,8 +784,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, len = this_frag->size; mapping = dma_map_single(&cp->pdev->dev, - ((void *) page_address(this_frag->page) + - this_frag->page_offset), + skb_frag_address(this_frag), len, PCI_DMA_TODEVICE); eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index d8df67ac51b..84083ec6e61 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -37,6 +37,7 @@ config 8139CP tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)" depends on PCI && EXPERIMENTAL select CRC32 + select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -51,6 +52,7 @@ config 8139TOO tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -105,6 +107,7 @@ config R8169 depends on PCI select FW_LOADER select CRC32 + select NET_CORE select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1cf8c3c1328..835bbb534c5 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5027,7 +5027,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, txd = tp->TxDescArray + entry; len = frag->size; - addr = ((void *) page_address(frag->page)) + frag->page_offset; + addr = skb_frag_address(frag); mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { if (net_ratelimit()) diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index f57ae230817..9755b49bbef 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -9,6 +9,7 @@ config SH_ETH CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757) select CRC32 + select NET_CORE select MII select MDIO_BITBANG select PHYLIB diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig index e832f46660c..c1c4bb868a3 100644 --- a/drivers/net/ethernet/sgi/Kconfig +++ b/drivers/net/ethernet/sgi/Kconfig @@ -22,6 +22,7 @@ config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 select CRC32 + select NET_CORE select MII ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig index 68d052b09af..f1135cc1bd4 100644 --- a/drivers/net/ethernet/sis/Kconfig +++ b/drivers/net/ethernet/sis/Kconfig @@ -22,6 +22,7 @@ config SIS900 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -38,6 +39,7 @@ config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index f9619285b5e..1854c88dfb9 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -37,6 +37,7 @@ config SMC9194 config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 + select NET_CORE select MII depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ MN10300 || COLDFIRE) @@ -56,6 +57,7 @@ config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" depends on PCMCIA select CRC32 + select NET_CORE select MII ---help--- Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA @@ -68,6 +70,7 @@ config EPIC100 tristate "SMC EtherPower II" depends on PCI select CRC32 + select NET_CORE select MII ---help--- This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, @@ -78,6 +81,7 @@ config EPIC100 config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 + select NET_CORE select MII depends on (ARM || SUPERH || MN10300) ---help--- @@ -95,6 +99,7 @@ config SMSC911X tristate "SMSC LAN911x/LAN921x families embedded ethernet support" depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) select CRC32 + select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index cda61e37c35..8cd9ddec05a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,6 +1,7 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" depends on HAS_IOMEM + select NET_CORE select MII select PHYLIB select CRC32 @@ -11,6 +12,14 @@ config STMMAC_ETH if STMMAC_ETH +config STMMAC_DEBUG_FS + bool "Enable monitoring via sysFS " + default n + depends on STMMAC_ETH && DEBUG_FS + -- help + The stmmac entry in /sys reports DMA TX/RX rings + or (if supported) the HW cap register. + config STMMAC_DA bool "STMMAC DMA arbitration scheme" default n diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 9691733ddb8..0f23d95746b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,4 +2,5 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ - dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y) + dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ + mmc_core.o $(stmmac-y) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 375ea193e13..22c61b2ebfa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -29,6 +29,7 @@ #endif #include "descs.h" +#include "mmc.h" #undef CHIP_DEBUG_PRINT /* Turn-on extra printk debug for MAC core, dma and descriptors */ @@ -115,6 +116,37 @@ enum tx_dma_irq_status { handle_tx_rx = 3, }; +/* DMA HW capabilities */ +struct dma_features { + unsigned int mbps_10_100; + unsigned int mbps_1000; + unsigned int half_duplex; + unsigned int hash_filter; + unsigned int multi_addr; + unsigned int pcs; + unsigned int sma_mdio; + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + unsigned int rmon; + /* IEEE 1588-2002*/ + unsigned int time_stamp; + /* IEEE 1588-2008*/ + unsigned int atime_stamp; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + unsigned int eee; + unsigned int av; + /* TX and RX csum */ + unsigned int tx_coe; + unsigned int rx_coe_type1; + unsigned int rx_coe_type2; + unsigned int rxfifo_over_2048; + /* TX and RX number of channels */ + unsigned int number_rx_channel; + unsigned int number_tx_channel; + /* Alternate (enhanced) DESC mode*/ + unsigned int enh_desc; +}; + /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ #define BUF_SIZE_16KiB 16384 #define BUF_SIZE_8KiB 8192 @@ -130,17 +162,6 @@ enum tx_dma_irq_status { #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ -/* MAC Management Counters register */ -#define MMC_CONTROL 0x00000100 /* MMC Control */ -#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */ -#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */ -#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */ -#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */ - -#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */ -#define MMC_CONTROL_MAX_FRM_SHIFT 3 -#define MMC_CONTROL_MAX_FRAME 0x7FF - struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, @@ -198,6 +219,8 @@ struct stmmac_dma_ops { void (*stop_rx) (void __iomem *ioaddr); int (*dma_interrupt) (void __iomem *ioaddr, struct stmmac_extra_stats *x); + /* If supported then get the optional core features */ + unsigned int (*get_hw_feature) (void __iomem *ioaddr); }; struct stmmac_ops { @@ -240,6 +263,7 @@ struct mac_device_info { const struct stmmac_dma_ops *dma; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; + unsigned int synopsys_uid; }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 0f63b3c83c1..b1c48b97594 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -37,11 +37,6 @@ static void dwmac1000_core_init(void __iomem *ioaddr) value |= GMAC_CORE_INIT; writel(value, ioaddr + GMAC_CONTROL); - /* STBus Bridge Configuration */ - /*writel(0xc5608, ioaddr + 0x00007000);*/ - - /* Freeze MMC counters */ - writel(0x8, ioaddr + GMAC_MMC_CTRL); /* Mask GMAC interrupts */ writel(0x207, ioaddr + GMAC_INT_MASK); @@ -229,10 +224,7 @@ static const struct stmmac_ops dwmac1000_ops = { struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) { struct mac_device_info *mac; - u32 uid = readl(ioaddr + GMAC_VERSION); - - pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n", - ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); + u32 hwid = readl(ioaddr + GMAC_VERSION); mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); if (!mac) @@ -246,6 +238,7 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) mac->link.speed = GMAC_CONTROL_FES; mac->mii.addr = GMAC_MII_ADDR; mac->mii.data = GMAC_MII_DATA; + mac->synopsys_uid = hwid; return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 3dbeea61908..da66ac511c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -118,13 +118,6 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, writel(csr6, ioaddr + DMA_CONTROL); } -/* Not yet implemented --- no RMON module */ -static void dwmac1000_dma_diagnostic_fr(void *data, - struct stmmac_extra_stats *x, void __iomem *ioaddr) -{ - return; -} - static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) { int i; @@ -139,11 +132,15 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) } } +static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) +{ + return readl(ioaddr + DMA_HW_FEATURE); +} + const struct stmmac_dma_ops dwmac1000_dma_ops = { .init = dwmac1000_dma_init, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, - .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, @@ -152,4 +149,5 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .start_rx = dwmac_dma_start_rx, .stop_rx = dwmac_dma_stop_rx, .dma_interrupt = dwmac_dma_interrupt, + .get_hw_feature = dwmac1000_get_hw_feature, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 743a5801763..138fb8dd1e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -70,17 +70,6 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr) readl(ioaddr + MAC_VLAN1)); pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, readl(ioaddr + MAC_VLAN2)); - pr_info("\n\tMAC management counter registers\n"); - pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", - MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); - pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", - MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); - pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", - MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); - pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", - MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); - pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", - MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); } static void dwmac100_irq_status(void __iomem *ioaddr) @@ -199,6 +188,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) mac->link.speed = 0; mac->mii.addr = MAC_MII_ADDR; mac->mii.data = MAC_MII_DATA; + mac->synopsys_uid = 0; return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index da3f5ccf83d..437edacd602 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -34,6 +34,7 @@ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ +#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ /* DMA Control register defines */ #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h new file mode 100644 index 00000000000..a38352024cb --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -0,0 +1,131 @@ +/******************************************************************************* + MMC Header file + + Copyright (C) 2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +/* MMC control register */ +/* When set, all counter are reset */ +#define MMC_CNTRL_COUNTER_RESET 0x1 +/* When set, do not roll over zero + * after reaching the max value*/ +#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2 +#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */ +#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the + * current value.*/ +#define MMC_CNTRL_PRESET 0x10 +#define MMC_CNTRL_FULL_HALF_PRESET 0x20 +struct stmmac_counters { + unsigned int mmc_tx_octetcount_gb; + unsigned int mmc_tx_framecount_gb; + unsigned int mmc_tx_broadcastframe_g; + unsigned int mmc_tx_multicastframe_g; + unsigned int mmc_tx_64_octets_gb; + unsigned int mmc_tx_65_to_127_octets_gb; + unsigned int mmc_tx_128_to_255_octets_gb; + unsigned int mmc_tx_256_to_511_octets_gb; + unsigned int mmc_tx_512_to_1023_octets_gb; + unsigned int mmc_tx_1024_to_max_octets_gb; + unsigned int mmc_tx_unicast_gb; + unsigned int mmc_tx_multicast_gb; + unsigned int mmc_tx_broadcast_gb; + unsigned int mmc_tx_underflow_error; + unsigned int mmc_tx_singlecol_g; + unsigned int mmc_tx_multicol_g; + unsigned int mmc_tx_deferred; + unsigned int mmc_tx_latecol; + unsigned int mmc_tx_exesscol; + unsigned int mmc_tx_carrier_error; + unsigned int mmc_tx_octetcount_g; + unsigned int mmc_tx_framecount_g; + unsigned int mmc_tx_excessdef; + unsigned int mmc_tx_pause_frame; + unsigned int mmc_tx_vlan_frame_g; + + /* MMC RX counter registers */ + unsigned int mmc_rx_framecount_gb; + unsigned int mmc_rx_octetcount_gb; + unsigned int mmc_rx_octetcount_g; + unsigned int mmc_rx_broadcastframe_g; + unsigned int mmc_rx_multicastframe_g; + unsigned int mmc_rx_crc_errror; + unsigned int mmc_rx_align_error; + unsigned int mmc_rx_run_error; + unsigned int mmc_rx_jabber_error; + unsigned int mmc_rx_undersize_g; + unsigned int mmc_rx_oversize_g; + unsigned int mmc_rx_64_octets_gb; + unsigned int mmc_rx_65_to_127_octets_gb; + unsigned int mmc_rx_128_to_255_octets_gb; + unsigned int mmc_rx_256_to_511_octets_gb; + unsigned int mmc_rx_512_to_1023_octets_gb; + unsigned int mmc_rx_1024_to_max_octets_gb; + unsigned int mmc_rx_unicast_g; + unsigned int mmc_rx_length_error; + unsigned int mmc_rx_autofrangetype; + unsigned int mmc_rx_pause_frames; + unsigned int mmc_rx_fifo_overflow; + unsigned int mmc_rx_vlan_frames_gb; + unsigned int mmc_rx_watchdog_error; + /* IPC */ + unsigned int mmc_rx_ipc_intr_mask; + unsigned int mmc_rx_ipc_intr; + /* IPv4 */ + unsigned int mmc_rx_ipv4_gd; + unsigned int mmc_rx_ipv4_hderr; + unsigned int mmc_rx_ipv4_nopay; + unsigned int mmc_rx_ipv4_frag; + unsigned int mmc_rx_ipv4_udsbl; + + unsigned int mmc_rx_ipv4_gd_octets; + unsigned int mmc_rx_ipv4_hderr_octets; + unsigned int mmc_rx_ipv4_nopay_octets; + unsigned int mmc_rx_ipv4_frag_octets; + unsigned int mmc_rx_ipv4_udsbl_octets; + + /* IPV6 */ + unsigned int mmc_rx_ipv6_gd_octets; + unsigned int mmc_rx_ipv6_hderr_octets; + unsigned int mmc_rx_ipv6_nopay_octets; + + unsigned int mmc_rx_ipv6_gd; + unsigned int mmc_rx_ipv6_hderr; + unsigned int mmc_rx_ipv6_nopay; + + /* Protocols */ + unsigned int mmc_rx_udp_gd; + unsigned int mmc_rx_udp_err; + unsigned int mmc_rx_tcp_gd; + unsigned int mmc_rx_tcp_err; + unsigned int mmc_rx_icmp_gd; + unsigned int mmc_rx_icmp_err; + + unsigned int mmc_rx_udp_gd_octets; + unsigned int mmc_rx_udp_err_octets; + unsigned int mmc_rx_tcp_gd_octets; + unsigned int mmc_rx_tcp_err_octets; + unsigned int mmc_rx_icmp_gd_octets; + unsigned int mmc_rx_icmp_err_octets; +}; + +extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); +extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); +extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c new file mode 100644 index 00000000000..41e6b33e1b0 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -0,0 +1,265 @@ +/******************************************************************************* + DWMAC Management Counters + + Copyright (C) 2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/io.h> +#include "mmc.h" + +/* MAC Management Counters register offset */ + +#define MMC_CNTRL 0x00000100 /* MMC Control */ +#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ +#define MMC_DEFAUL_MASK 0xffffffff + +/* MMC TX counter registers */ + +/* Note: + * _GB register stands for good and bad frames + * _G is for good only. + */ +#define MMC_TX_OCTETCOUNT_GB 0x00000114 +#define MMC_TX_FRAMECOUNT_GB 0x00000118 +#define MMC_TX_BROADCASTFRAME_G 0x0000011c +#define MMC_TX_MULTICASTFRAME_G 0x00000120 +#define MMC_TX_64_OCTETS_GB 0x00000124 +#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 +#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c +#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 +#define MMC_TX_UNICAST_GB 0x0000013c +#define MMC_TX_MULTICAST_GB 0x00000140 +#define MMC_TX_BROADCAST_GB 0x00000144 +#define MMC_TX_UNDERFLOW_ERROR 0x00000148 +#define MMC_TX_SINGLECOL_G 0x0000014c +#define MMC_TX_MULTICOL_G 0x00000150 +#define MMC_TX_DEFERRED 0x00000154 +#define MMC_TX_LATECOL 0x00000158 +#define MMC_TX_EXESSCOL 0x0000015c +#define MMC_TX_CARRIER_ERROR 0x00000160 +#define MMC_TX_OCTETCOUNT_G 0x00000164 +#define MMC_TX_FRAMECOUNT_G 0x00000168 +#define MMC_TX_EXCESSDEF 0x0000016c +#define MMC_TX_PAUSE_FRAME 0x00000170 +#define MMC_TX_VLAN_FRAME_G 0x00000174 + +/* MMC RX counter registers */ +#define MMC_RX_FRAMECOUNT_GB 0x00000180 +#define MMC_RX_OCTETCOUNT_GB 0x00000184 +#define MMC_RX_OCTETCOUNT_G 0x00000188 +#define MMC_RX_BROADCASTFRAME_G 0x0000018c +#define MMC_RX_MULTICASTFRAME_G 0x00000190 +#define MMC_RX_CRC_ERRROR 0x00000194 +#define MMC_RX_ALIGN_ERROR 0x00000198 +#define MMC_RX_RUN_ERROR 0x0000019C +#define MMC_RX_JABBER_ERROR 0x000001A0 +#define MMC_RX_UNDERSIZE_G 0x000001A4 +#define MMC_RX_OVERSIZE_G 0x000001A8 +#define MMC_RX_64_OCTETS_GB 0x000001AC +#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 +#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 +#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 +#define MMC_RX_UNICAST_G 0x000001c4 +#define MMC_RX_LENGTH_ERROR 0x000001c8 +#define MMC_RX_AUTOFRANGETYPE 0x000001cc +#define MMC_RX_PAUSE_FRAMES 0x000001d0 +#define MMC_RX_FIFO_OVERFLOW 0x000001d4 +#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 +#define MMC_RX_WATCHDOG_ERROR 0x000001dc +/* IPC*/ +#define MMC_RX_IPC_INTR_MASK 0x00000200 +#define MMC_RX_IPC_INTR 0x00000208 +/* IPv4*/ +#define MMC_RX_IPV4_GD 0x00000210 +#define MMC_RX_IPV4_HDERR 0x00000214 +#define MMC_RX_IPV4_NOPAY 0x00000218 +#define MMC_RX_IPV4_FRAG 0x0000021C +#define MMC_RX_IPV4_UDSBL 0x00000220 + +#define MMC_RX_IPV4_GD_OCTETS 0x00000250 +#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 +#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 + +/* IPV6*/ +#define MMC_RX_IPV6_GD_OCTETS 0x00000264 +#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c + +#define MMC_RX_IPV6_GD 0x00000224 +#define MMC_RX_IPV6_HDERR 0x00000228 +#define MMC_RX_IPV6_NOPAY 0x0000022c + +/* Protocols*/ +#define MMC_RX_UDP_GD 0x00000230 +#define MMC_RX_UDP_ERR 0x00000234 +#define MMC_RX_TCP_GD 0x00000238 +#define MMC_RX_TCP_ERR 0x0000023c +#define MMC_RX_ICMP_GD 0x00000240 +#define MMC_RX_ICMP_ERR 0x00000244 + +#define MMC_RX_UDP_GD_OCTETS 0x00000270 +#define MMC_RX_UDP_ERR_OCTETS 0x00000274 +#define MMC_RX_TCP_GD_OCTETS 0x00000278 +#define MMC_RX_TCP_ERR_OCTETS 0x0000027c +#define MMC_RX_ICMP_GD_OCTETS 0x00000280 +#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 + +void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) +{ + u32 value = readl(ioaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, ioaddr + MMC_CNTRL); + + pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", + MMC_CNTRL, value); +} + +/* To mask all all interrupts.*/ +void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) +{ + writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) +{ + mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_65_to_127_octets_gb += + readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); + mmc->mmc_tx_128_to_255_octets_gb += + readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); + mmc->mmc_tx_256_to_511_octets_gb += + readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); + mmc->mmc_tx_512_to_1023_octets_gb += + readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); + mmc->mmc_tx_1024_to_max_octets_gb += + readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); + + /* MMC RX counter registers */ + mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); + mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_65_to_127_octets_gb += + readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); + mmc->mmc_rx_128_to_255_octets_gb += + readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); + mmc->mmc_rx_256_to_511_octets_gb += + readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); + mmc->mmc_rx_512_to_1023_octets_gb += + readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); + mmc->mmc_rx_1024_to_max_octets_gb += + readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); + /* IPC */ + mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); + /* IPv4 */ + mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); + + mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_hderr_octets += + readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); + mmc->mmc_rx_ipv4_nopay_octets += + readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); + mmc->mmc_rx_ipv4_udsbl_octets += + readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); + + /* IPV6 */ + mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_hderr_octets += + readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); + mmc->mmc_rx_ipv6_nopay_octets += + readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); + + mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); + + /* Protocols */ + mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); + + mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index de1929b2641..1434bdb390d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,7 +20,7 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ -#define DRV_MODULE_VERSION "July_2011" +#define DRV_MODULE_VERSION "Aug_2011" #include <linux/stmmac.h> #include "common.h" @@ -72,10 +72,13 @@ struct stmmac_priv { spinlock_t lock; int wolopts; int wolenabled; + int wol_irq; #ifdef CONFIG_STMMAC_TIMER struct stmmac_timer *tm; #endif struct plat_stmmacenet_data *plat; + struct stmmac_counters mmc; + struct dma_features dma_cap; }; extern int stmmac_mdio_unregister(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 7ed8fb6c211..aedff9a90eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -46,7 +46,7 @@ struct stmmac_stats { { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ offsetof(struct stmmac_priv, xstats.m)} -static const struct stmmac_stats stmmac_gstrings_stats[] = { +static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(tx_underflow), STMMAC_STAT(tx_carrier), STMMAC_STAT(tx_losscarrier), @@ -91,19 +91,106 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) +/* HW MAC Management counters (if supported) */ +#define STMMAC_MMC_STAT(m) \ + { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ + offsetof(struct stmmac_priv, mmc.m)} + +static const struct stmmac_stats stmmac_gstr_mmc[] = { + STMMAC_MMC_STAT(mmc_tx_octetcount_gb), + STMMAC_MMC_STAT(mmc_tx_framecount_gb), + STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_tx_multicastframe_g), + STMMAC_MMC_STAT(mmc_tx_64_octets_gb), + STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_tx_unicast_gb), + STMMAC_MMC_STAT(mmc_tx_multicast_gb), + STMMAC_MMC_STAT(mmc_tx_broadcast_gb), + STMMAC_MMC_STAT(mmc_tx_underflow_error), + STMMAC_MMC_STAT(mmc_tx_singlecol_g), + STMMAC_MMC_STAT(mmc_tx_multicol_g), + STMMAC_MMC_STAT(mmc_tx_deferred), + STMMAC_MMC_STAT(mmc_tx_latecol), + STMMAC_MMC_STAT(mmc_tx_exesscol), + STMMAC_MMC_STAT(mmc_tx_carrier_error), + STMMAC_MMC_STAT(mmc_tx_octetcount_g), + STMMAC_MMC_STAT(mmc_tx_framecount_g), + STMMAC_MMC_STAT(mmc_tx_excessdef), + STMMAC_MMC_STAT(mmc_tx_pause_frame), + STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), + STMMAC_MMC_STAT(mmc_rx_framecount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_g), + STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_rx_multicastframe_g), + STMMAC_MMC_STAT(mmc_rx_crc_errror), + STMMAC_MMC_STAT(mmc_rx_align_error), + STMMAC_MMC_STAT(mmc_rx_run_error), + STMMAC_MMC_STAT(mmc_rx_jabber_error), + STMMAC_MMC_STAT(mmc_rx_undersize_g), + STMMAC_MMC_STAT(mmc_rx_oversize_g), + STMMAC_MMC_STAT(mmc_rx_64_octets_gb), + STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_rx_unicast_g), + STMMAC_MMC_STAT(mmc_rx_length_error), + STMMAC_MMC_STAT(mmc_rx_autofrangetype), + STMMAC_MMC_STAT(mmc_rx_pause_frames), + STMMAC_MMC_STAT(mmc_rx_fifo_overflow), + STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), + STMMAC_MMC_STAT(mmc_rx_watchdog_error), + STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), + STMMAC_MMC_STAT(mmc_rx_ipc_intr), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), + STMMAC_MMC_STAT(mmc_rx_udp_gd), + STMMAC_MMC_STAT(mmc_rx_udp_err), + STMMAC_MMC_STAT(mmc_rx_tcp_gd), + STMMAC_MMC_STAT(mmc_rx_tcp_err), + STMMAC_MMC_STAT(mmc_rx_icmp_gd), + STMMAC_MMC_STAT(mmc_rx_icmp_err), + STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_udp_err_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), +}; +#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_gstr_mmc) + static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct stmmac_priv *priv = netdev_priv(dev); - if (!priv->plat->has_gmac) - strcpy(info->driver, MAC100_ETHTOOL_NAME); - else + if (priv->plat->has_gmac) strcpy(info->driver, GMAC_ETHTOOL_NAME); + else + strcpy(info->driver, MAC100_ETHTOOL_NAME); strcpy(info->version, DRV_MODULE_VERSION); info->fw_version[0] = '\0'; - info->n_stats = STMMAC_STATS_LEN; } static int stmmac_ethtool_getsettings(struct net_device *dev, @@ -252,24 +339,44 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { struct stmmac_priv *priv = netdev_priv(dev); - int i; - - /* Update HW stats if supported */ - priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats, - priv->ioaddr); + int i, j = 0; + /* Update the DMA HW counters for dwmac10/100 */ + if (!priv->plat->has_gmac) + priv->hw->dma->dma_diagnostic_fr(&dev->stats, + (void *) &priv->xstats, + priv->ioaddr); + else { + /* If supported, for new GMAC chips expose the MMC counters */ + dwmac_mmc_read(priv->ioaddr, &priv->mmc); + + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstr_mmc[i].stat_offset; + + data[j++] = (stmmac_gstr_mmc[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } + } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; - data[i] = (stmmac_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } } static int stmmac_get_sset_count(struct net_device *netdev, int sset) { + struct stmmac_priv *priv = netdev_priv(netdev); + int len; + switch (sset) { case ETH_SS_STATS: - return STMMAC_STATS_LEN; + len = STMMAC_STATS_LEN; + + if (priv->plat->has_gmac) + len += STMMAC_MMC_STATS_LEN; + + return len; default: return -EOPNOTSUPP; } @@ -279,9 +386,16 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; u8 *p = data; + struct stmmac_priv *priv = netdev_priv(dev); switch (stringset) { case ETH_SS_STATS: + if (priv->plat->has_gmac) + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + memcpy(p, stmmac_gstr_mmc[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < STMMAC_STATS_LEN; i++) { memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); @@ -321,10 +435,10 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts) { pr_info("stmmac: wakeup enable\n"); device_set_wakeup_enable(priv->device, 1); - enable_irq_wake(dev->irq); + enable_irq_wake(priv->wol_irq); } else { device_set_wakeup_enable(priv->device, 0); - disable_irq_wake(dev->irq); + disable_irq_wake(priv->wol_irq); } spin_lock_irq(&priv->lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 68fb5b0593a..d0fbc5477d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -48,6 +48,10 @@ #include <linux/slab.h> #include <linux/prefetch.h> #include "stmmac.h" +#ifdef CONFIG_STMMAC_DEBUG_FS +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#endif #define STMMAC_RESOURCE_NAME "stmmaceth" @@ -748,6 +752,77 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) stmmac_tx_err(priv); } +static void stmmac_mmc_setup(struct stmmac_priv *priv) +{ + unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + + /* Do not manage MMC IRQ (FIXME) */ + dwmac_mmc_intr_all_mask(priv->ioaddr); + dwmac_mmc_ctrl(priv->ioaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); +} + +static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) +{ + u32 hwid = priv->hw->synopsys_uid; + + /* Only check valid Synopsys Id because old MAC chips + * have no HW registers where get the ID */ + if (likely(hwid)) { + u32 uid = ((hwid & 0x0000ff00) >> 8); + u32 synid = (hwid & 0x000000ff); + + pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", + uid, synid); + + return synid; + } + return 0; +} + +/* New GMAC chips support a new register to indicate the + * presence of the optional feature/functions. + */ +static int stmmac_get_hw_features(struct stmmac_priv *priv) +{ + u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); + + if (likely(hw_cap)) { + priv->dma_cap.mbps_10_100 = (hw_cap & 0x1); + priv->dma_cap.mbps_1000 = (hw_cap & 0x2) >> 1; + priv->dma_cap.half_duplex = (hw_cap & 0x4) >> 2; + priv->dma_cap.hash_filter = (hw_cap & 0x10) >> 4; + priv->dma_cap.multi_addr = (hw_cap & 0x20) >> 5; + priv->dma_cap.pcs = (hw_cap & 0x40) >> 6; + priv->dma_cap.sma_mdio = (hw_cap & 0x100) >> 8; + priv->dma_cap.pmt_remote_wake_up = (hw_cap & 0x200) >> 9; + priv->dma_cap.pmt_magic_frame = (hw_cap & 0x400) >> 10; + priv->dma_cap.rmon = (hw_cap & 0x800) >> 11; /* MMC */ + /* IEEE 1588-2002*/ + priv->dma_cap.time_stamp = (hw_cap & 0x1000) >> 12; + /* IEEE 1588-2008*/ + priv->dma_cap.atime_stamp = (hw_cap & 0x2000) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + priv->dma_cap.eee = (hw_cap & 0x4000) >> 14; + priv->dma_cap.av = (hw_cap & 0x8000) >> 15; + /* TX and RX csum */ + priv->dma_cap.tx_coe = (hw_cap & 0x10000) >> 16; + priv->dma_cap.rx_coe_type1 = (hw_cap & 0x20000) >> 17; + priv->dma_cap.rx_coe_type2 = (hw_cap & 0x40000) >> 18; + priv->dma_cap.rxfifo_over_2048 = (hw_cap & 0x80000) >> 19; + /* TX and RX number of channels */ + priv->dma_cap.number_rx_channel = (hw_cap & 0x300000) >> 20; + priv->dma_cap.number_tx_channel = (hw_cap & 0xc00000) >> 22; + /* Alternate (enhanced) DESC mode*/ + priv->dma_cap.enh_desc = (hw_cap & 0x1000000) >> 24; + + } else + pr_debug("\tNo HW DMA feature register supported"); + + return hw_cap; +} + /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -820,17 +895,16 @@ static int stmmac_open(struct net_device *dev) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->ioaddr); - priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); + stmmac_get_synopsys_id(priv); + + stmmac_get_hw_features(priv); + if (priv->rx_coe) pr_info("stmmac: Rx Checksum Offload Engine supported\n"); if (priv->plat->tx_coe) pr_info("\tTX Checksum insertion supported\n"); netdev_update_features(dev); - /* Initialise the MMC (if present) to disable all interrupts. */ - writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); - writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); - /* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); @@ -850,6 +924,8 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; + stmmac_mmc_setup(priv); + /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); @@ -1416,6 +1492,182 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } +#ifdef CONFIG_STMMAC_DEBUG_FS +static struct dentry *stmmac_fs_dir; +static struct dentry *stmmac_rings_status; +static struct dentry *stmmac_dma_cap; + +static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) +{ + struct tmp_s { + u64 a; + unsigned int b; + unsigned int c; + }; + int i; + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + + seq_printf(seq, "=======================\n"); + seq_printf(seq, " RX descriptor ring\n"); + seq_printf(seq, "=======================\n"); + + for (i = 0; i < priv->dma_rx_size; i++) { + struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); + seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", + i, (unsigned int)(x->a), + (unsigned int)((x->a) >> 32), x->b, x->c); + seq_printf(seq, "\n"); + } + + seq_printf(seq, "\n"); + seq_printf(seq, "=======================\n"); + seq_printf(seq, " TX descriptor ring\n"); + seq_printf(seq, "=======================\n"); + + for (i = 0; i < priv->dma_tx_size; i++) { + struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); + seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", + i, (unsigned int)(x->a), + (unsigned int)((x->a) >> 32), x->b, x->c); + seq_printf(seq, "\n"); + } + + return 0; +} + +static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, stmmac_sysfs_ring_read, inode->i_private); +} + +static const struct file_operations stmmac_rings_status_fops = { + .owner = THIS_MODULE, + .open = stmmac_sysfs_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + + if (!stmmac_get_hw_features(priv)) { + seq_printf(seq, "DMA HW features not supported\n"); + return 0; + } + + seq_printf(seq, "==============================\n"); + seq_printf(seq, "\tDMA HW features\n"); + seq_printf(seq, "==============================\n"); + + seq_printf(seq, "\t10/100 Mbps %s\n", + (priv->dma_cap.mbps_10_100) ? "Y" : "N"); + seq_printf(seq, "\t1000 Mbps %s\n", + (priv->dma_cap.mbps_1000) ? "Y" : "N"); + seq_printf(seq, "\tHalf duple %s\n", + (priv->dma_cap.half_duplex) ? "Y" : "N"); + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", + (priv->dma_cap.pcs) ? "Y" : "N"); + seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", + (priv->dma_cap.sma_mdio) ? "Y" : "N"); + seq_printf(seq, "\tPMT Remote wake up: %s\n", + (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); + seq_printf(seq, "\tPMT Magic Frame: %s\n", + (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); + seq_printf(seq, "\tRMON module: %s\n", + (priv->dma_cap.rmon) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", + (priv->dma_cap.time_stamp) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", + (priv->dma_cap.atime_stamp) ? "Y" : "N"); + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", + (priv->dma_cap.eee) ? "Y" : "N"); + seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); + seq_printf(seq, "\tChecksum Offload in TX: %s\n", + (priv->dma_cap.tx_coe) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", + (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); + seq_printf(seq, "\tNumber of Additional RX channel: %d\n", + priv->dma_cap.number_rx_channel); + seq_printf(seq, "\tNumber of Additional TX channel: %d\n", + priv->dma_cap.number_tx_channel); + seq_printf(seq, "\tEnhanced descriptors: %s\n", + (priv->dma_cap.enh_desc) ? "Y" : "N"); + + return 0; +} + +static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) +{ + return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); +} + +static const struct file_operations stmmac_dma_cap_fops = { + .owner = THIS_MODULE, + .open = stmmac_sysfs_dma_cap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int stmmac_init_fs(struct net_device *dev) +{ + /* Create debugfs entries */ + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + + if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { + pr_err("ERROR %s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME); + + return -ENOMEM; + } + + /* Entry to report DMA RX/TX rings */ + stmmac_rings_status = debugfs_create_file("descriptors_status", + S_IRUGO, stmmac_fs_dir, dev, + &stmmac_rings_status_fops); + + if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { + pr_info("ERROR creating stmmac ring debugfs file\n"); + debugfs_remove(stmmac_fs_dir); + + return -ENOMEM; + } + + /* Entry to report the DMA HW features */ + stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, + dev, &stmmac_dma_cap_fops); + + if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { + pr_info("ERROR creating stmmac MMC debugfs file\n"); + debugfs_remove(stmmac_rings_status); + debugfs_remove(stmmac_fs_dir); + + return -ENOMEM; + } + + return 0; +} + +static void stmmac_exit_fs(void) +{ + debugfs_remove(stmmac_rings_status); + debugfs_remove(stmmac_dma_cap); + debugfs_remove(stmmac_fs_dir); +} +#endif /* CONFIG_STMMAC_DEBUG_FS */ + static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, .ndo_start_xmit = stmmac_xmit, @@ -1519,7 +1771,7 @@ static int stmmac_mac_device_setup(struct net_device *dev) if (device_can_wakeup(priv->device)) { priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - enable_irq_wake(dev->irq); + enable_irq_wake(priv->wol_irq); } return 0; @@ -1592,6 +1844,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev) pr_info("\tPMT module supported\n"); device_set_wakeup_capable(&pdev->dev, 1); } + /* + * On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (priv->wol_irq == -ENXIO) + priv->wol_irq = ndev->irq; + platform_set_drvdata(pdev, ndev); @@ -1630,6 +1894,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev) if (ret < 0) goto out_unregister; pr_debug("registered!\n"); + +#ifdef CONFIG_STMMAC_DEBUG_FS + ret = stmmac_init_fs(ndev); + if (ret < 0) + pr_warning("\tFailed debugFS registration"); +#endif + return 0; out_unregister: @@ -1682,6 +1953,10 @@ static int stmmac_dvr_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); +#ifdef CONFIG_STMMAC_DEBUG_FS + stmmac_exit_fs(); +#endif + free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 1776a37b7ae..f07a72150c6 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2048,8 +2048,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb->truesize += hlen - swivel; skb->len += hlen - swivel; - get_page(page->buffer); - frag->page = page->buffer; + __skb_frag_set_page(frag, page->buffer); + __skb_frag_ref(frag); frag->page_offset = off; frag->size = hlen - swivel; @@ -2072,8 +2072,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb->len += hlen; frag++; - get_page(page->buffer); - frag->page = page->buffer; + __skb_frag_set_page(frag, page->buffer); + __skb_frag_ref(frag); frag->page_offset = 0; frag->size = hlen; RX_USED_ADD(page, hlen + cp->crc_size); @@ -2830,9 +2830,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; len = fragp->size; - mapping = pci_map_page(cp->pdev, fragp->page, - fragp->page_offset, len, - PCI_DMA_TODEVICE); + mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, + PCI_DMA_TODEVICE); tabort = cas_calc_tabort(cp, fragp->page_offset, len); if (unlikely(tabort)) { @@ -2843,7 +2842,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, ctrl, 0); entry = TX_DESC_NEXT(ring, entry); - addr = cas_page_map(fragp->page); + addr = cas_page_map(skb_frag_page(fragp)); memcpy(tx_tiny_buf(cp, ring, entry), addr + fragp->page_offset + len - tabort, tabort); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 3c9ef1c196a..cad58f26c47 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3290,11 +3290,8 @@ static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, u32 offset, u32 size) { int i = skb_shinfo(skb)->nr_frags; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - frag->page = page; - frag->page_offset = offset; - frag->size = size; + __skb_fill_page_desc(skb, i, page, offset, size); skb->len += size; skb->data_len += size; @@ -6737,7 +6734,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; - mapping = np->ops->map_page(np->device, frag->page, + mapping = np->ops->map_page(np->device, skb_frag_page(frag), frag->page_offset, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index e5d82a53ea5..68a9ba66feb 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -22,6 +22,7 @@ config VIA_RHINE tristate "VIA Rhine support" depends on PCI select CRC32 + select NET_CORE select MII ---help--- If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A), @@ -47,6 +48,7 @@ config VIA_VELOCITY depends on PCI select CRC32 select CRC_CCITT + select NET_CORE select MII ---help--- If you have a VIA "Velocity" based network card say Y here. diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 82660672dcd..d275e276e74 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -2,7 +2,7 @@ * SuperH IrDA Driver * * Copyright (C) 2010 Renesas Solutions Corp. - * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * Based on sh_sir.c * Copyright (C) 2009 Renesas Solutions Corp. @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/clk.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> @@ -144,8 +145,8 @@ struct sh_irda_xir_func { struct sh_irda_self { void __iomem *membase; - unsigned int irq; - struct clk *clk; + unsigned int irq; + struct platform_device *pdev; struct net_device *ndev; @@ -264,7 +265,7 @@ static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate) return 0; } -static int xir_get_rcv_length(struct sh_irda_self *self) +static int sh_irda_get_rcv_length(struct sh_irda_self *self) { return RFL_MASK & sh_irda_read(self, IRRFLR); } @@ -274,47 +275,47 @@ static int xir_get_rcv_length(struct sh_irda_self *self) * NONE MODE * *=====================================*/ -static int xir_fre(struct sh_irda_self *self) +static int sh_irda_xir_fre(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; dev_err(dev, "none mode: frame recv\n"); return 0; } -static int xir_trov(struct sh_irda_self *self) +static int sh_irda_xir_trov(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; dev_err(dev, "none mode: buffer ram over\n"); return 0; } -static int xir_9(struct sh_irda_self *self) +static int sh_irda_xir_9(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; dev_err(dev, "none mode: time over\n"); return 0; } -static int xir_8(struct sh_irda_self *self) +static int sh_irda_xir_8(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; dev_err(dev, "none mode: framing error\n"); return 0; } -static int xir_fte(struct sh_irda_self *self) +static int sh_irda_xir_fte(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; dev_err(dev, "none mode: frame transmit end\n"); return 0; } -static struct sh_irda_xir_func xir_func = { - .xir_fre = xir_fre, - .xir_trov = xir_trov, - .xir_9 = xir_9, - .xir_8 = xir_8, - .xir_fte = xir_fte, +static struct sh_irda_xir_func sh_irda_xir_func = { + .xir_fre = sh_irda_xir_fre, + .xir_trov = sh_irda_xir_trov, + .xir_9 = sh_irda_xir_9, + .xir_8 = sh_irda_xir_8, + .xir_fte = sh_irda_xir_fte, }; /*===================================== @@ -323,12 +324,12 @@ static struct sh_irda_xir_func xir_func = { * * MIR/FIR are not supported now *=====================================*/ -static struct sh_irda_xir_func mfir_func = { - .xir_fre = xir_fre, - .xir_trov = xir_trov, - .xir_9 = xir_9, - .xir_8 = xir_8, - .xir_fte = xir_fte, +static struct sh_irda_xir_func sh_irda_mfir_func = { + .xir_fre = sh_irda_xir_fre, + .xir_trov = sh_irda_xir_trov, + .xir_9 = sh_irda_xir_9, + .xir_8 = sh_irda_xir_8, + .xir_fte = sh_irda_xir_fte, }; /*===================================== @@ -336,12 +337,12 @@ static struct sh_irda_xir_func mfir_func = { * SIR MODE * *=====================================*/ -static int sir_fre(struct sh_irda_self *self) +static int sh_irda_sir_fre(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; u16 data16; u8 *data = (u8 *)&data16; - int len = xir_get_rcv_length(self); + int len = sh_irda_get_rcv_length(self); int i, j; if (len > IRDARAM_LEN) @@ -364,7 +365,7 @@ static int sir_fre(struct sh_irda_self *self) return 0; } -static int sir_trov(struct sh_irda_self *self) +static int sh_irda_sir_trov(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; @@ -373,7 +374,7 @@ static int sir_trov(struct sh_irda_self *self) return 0; } -static int sir_tot(struct sh_irda_self *self) +static int sh_irda_sir_tot(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; @@ -383,7 +384,7 @@ static int sir_tot(struct sh_irda_self *self) return 0; } -static int sir_fer(struct sh_irda_self *self) +static int sh_irda_sir_fer(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; @@ -392,7 +393,7 @@ static int sir_fer(struct sh_irda_self *self) return 0; } -static int sir_fte(struct sh_irda_self *self) +static int sh_irda_sir_fte(struct sh_irda_self *self) { struct device *dev = &self->ndev->dev; @@ -402,12 +403,12 @@ static int sir_fte(struct sh_irda_self *self) return 0; } -static struct sh_irda_xir_func sir_func = { - .xir_fre = sir_fre, - .xir_trov = sir_trov, - .xir_9 = sir_tot, - .xir_8 = sir_fer, - .xir_fte = sir_fte, +static struct sh_irda_xir_func sh_irda_sir_func = { + .xir_fre = sh_irda_sir_fre, + .xir_trov = sh_irda_sir_trov, + .xir_9 = sh_irda_sir_tot, + .xir_8 = sh_irda_sir_fer, + .xir_fte = sh_irda_sir_fte, }; static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) @@ -421,22 +422,22 @@ static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) case SH_IRDA_SIR: name = "SIR"; data = TMD_SIR; - func = &sir_func; + func = &sh_irda_sir_func; break; case SH_IRDA_MIR: name = "MIR"; data = TMD_MIR; - func = &mfir_func; + func = &sh_irda_mfir_func; break; case SH_IRDA_FIR: name = "FIR"; data = TMD_FIR; - func = &mfir_func; + func = &sh_irda_mfir_func; break; default: - name = "NONE"; - data = 0; - func = &xir_func; + name = "NONE"; + data = 0; + func = &sh_irda_xir_func; break; } @@ -694,7 +695,7 @@ static int sh_irda_open(struct net_device *ndev) struct sh_irda_self *self = netdev_priv(ndev); int err; - clk_enable(self->clk); + pm_runtime_get_sync(&self->pdev->dev); err = sh_irda_crc_init(self); if (err) goto open_err; @@ -718,7 +719,7 @@ static int sh_irda_open(struct net_device *ndev) return 0; open_err: - clk_disable(self->clk); + pm_runtime_put_sync(&self->pdev->dev); return err; } @@ -734,6 +735,7 @@ static int sh_irda_stop(struct net_device *ndev) } netif_stop_queue(ndev); + pm_runtime_put_sync(&self->pdev->dev); dev_info(&ndev->dev, "stoped\n"); @@ -786,11 +788,8 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) if (err) goto err_mem_2; - self->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(self->clk)) { - dev_err(&pdev->dev, "cannot get irda clock\n"); - goto err_mem_3; - } + self->pdev = pdev; + pm_runtime_enable(&pdev->dev); irda_init_max_qos_capabilies(&self->qos); @@ -820,8 +819,7 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) goto exit; err_mem_4: - clk_put(self->clk); -err_mem_3: + pm_runtime_disable(&pdev->dev); sh_irda_remove_iobuf(self); err_mem_2: iounmap(self->membase); @@ -840,7 +838,7 @@ static int __devexit sh_irda_remove(struct platform_device *pdev) return 0; unregister_netdev(ndev); - clk_put(self->clk); + pm_runtime_disable(&pdev->dev); sh_irda_remove_iobuf(self); iounmap(self->membase); free_netdev(ndev); @@ -849,11 +847,29 @@ static int __devexit sh_irda_remove(struct platform_device *pdev) return 0; } +static int sh_irda_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops sh_irda_pm_ops = { + .runtime_suspend = sh_irda_runtime_nop, + .runtime_resume = sh_irda_runtime_nop, +}; + static struct platform_driver sh_irda_driver = { - .probe = sh_irda_probe, - .remove = __devexit_p(sh_irda_remove), - .driver = { - .name = DRIVER_NAME, + .probe = sh_irda_probe, + .remove = __devexit_p(sh_irda_remove), + .driver = { + .name = DRIVER_NAME, + .pm = &sh_irda_pm_ops, }, }; @@ -870,6 +886,6 @@ static void __exit sh_irda_exit(void) module_init(sh_irda_init); module_exit(sh_irda_exit); -MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); +MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); MODULE_DESCRIPTION("SuperH IrDA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 836e13fcb3e..b100c90e850 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -543,7 +543,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); - return dev_ethtool_get_settings(vlan->lowerdev, cmd); + + return __ethtool_get_settings(vlan->lowerdev, cmd); } static const struct ethtool_ops macvlan_ethtool_ops = { diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index ab96c319a24..7c3f84acfdf 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -503,10 +503,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, skb->truesize += len; atomic_add(len, &skb->sk->sk_wmem_alloc); while (len) { - f = &skb_shinfo(skb)->frags[i]; - f->page = page[i]; - f->page_offset = base & ~PAGE_MASK; - f->size = min_t(int, len, PAGE_SIZE - f->page_offset); + __skb_fill_page_desc( + skb, i, page[i], + base & ~PAGE_MASK, + min_t(int, len, PAGE_SIZE - f->page_offset)); skb_shinfo(skb)->nr_frags++; /* increase sk_wmem_alloc */ base += f->size; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 84d4608153c..23357612793 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -68,6 +68,7 @@ config USB_KAWETH config USB_PEGASUS tristate "USB Pegasus/Pegasus-II based ethernet device support" + select NET_CORE select MII ---help--- Say Y here if you know you have Pegasus or Pegasus-II based adapter. @@ -84,6 +85,7 @@ config USB_PEGASUS config USB_RTL8150 tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)" depends on EXPERIMENTAL + select NET_CORE select MII help Say Y here if you have RTL8150 based usb-ethernet adapter. @@ -95,6 +97,7 @@ config USB_RTL8150 config USB_USBNET tristate "Multi-purpose USB Networking Framework" + select NET_CORE select MII ---help--- This driver supports several kinds of network links over USB, |