diff options
Diffstat (limited to 'drivers/net/ethernet')
91 files changed, 1087 insertions, 705 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 1b78ca7a978..a5f91e1e8fe 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -5,10 +5,7 @@ config NET_VENDOR_8390 bool "National Semi-conductor 8390 devices" default y - depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \ - ISA || MAC || M32R || MACH_TX49XX || \ - H8300 || ARM || MIPS || ZORRO || PCMCIA || \ - EXPERIMENTAL) + depends on NET_VENDOR_NATSEMI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 1ed886d421f..36d6abd1cff 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -44,8 +44,8 @@ config ATL1 will be called atl1. config ATL1E - tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "Atheros L1E Gigabit Ethernet support" + depends on PCI select CRC32 select NET_CORE select MII @@ -56,8 +56,8 @@ config ATL1E will be called atl1e. config ATL1C - tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "Atheros L1C Gigabit Ethernet support" + depends on PCI select CRC32 select NET_CORE select MII diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a7efec29303..9b017d9c58e 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -381,7 +381,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) } #ifdef CONFIG_BCM47XX -#include <asm/mach-bcm47xx/nvram.h> +#include <bcm47xx_nvram.h> static void b44_wap54g10_workaround(struct b44 *bp) { char buf[20]; @@ -393,7 +393,7 @@ static void b44_wap54g10_workaround(struct b44 *bp) * see https://dev.openwrt.org/ticket/146 * check and reset bit "isolate" */ - if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) + if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0) return; if (simple_strtoul(buf, NULL, 0) == 2) { err = __b44_readphy(bp, 0, MII_BMCR, &val); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 3fd32880e52..da5f4397f87 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -15,7 +15,7 @@ #include <linux/mii.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> -#include <asm/mach-bcm47xx/nvram.h> +#include <bcm47xx_nvram.h> static const struct bcma_device_id bgmac_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), @@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); } else { + /* Omit CRC. */ + len -= ETH_FCS_LEN; + new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); if (new_skb) { skb_put(new_skb, len); skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, new_skb->data, len); + skb_checksum_none_assert(skb); new_skb->protocol = eth_type_trans(new_skb, bgmac->net_dev); netif_receive_skb(new_skb); @@ -436,6 +440,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + int j; + ring = &bgmac->rx_ring[i]; ring->num_slots = BGMAC_RX_RING_SLOTS; ring->mmio_base = ring_base[i]; @@ -458,8 +464,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); /* Alloc RX slots */ - for (i = 0; i < ring->num_slots; i++) { - err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]); + for (j = 0; j < ring->num_slots; j++) { + err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); if (err) { bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); goto err_dma_free; @@ -496,6 +502,8 @@ static void bgmac_dma_init(struct bgmac *bgmac) } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + int j; + ring = &bgmac->rx_ring[i]; /* We don't implement unaligned addressing, so enable first */ @@ -505,11 +513,11 @@ static void bgmac_dma_init(struct bgmac *bgmac) bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, upper_32_bits(ring->dma_base)); - for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots; - i++, dma_desc++) { + for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; + j++, dma_desc++) { ctl0 = ctl1 = 0; - if (i == ring->num_slots - 1) + if (j == ring->num_slots - 1) ctl0 |= BGMAC_DESC_CTL0_EOT; ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; /* Is there any BGMAC device that requires extension? */ @@ -517,8 +525,8 @@ static void bgmac_dma_init(struct bgmac *bgmac) * B43_DMA64_DCTL1_ADDREXT_MASK; */ - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr)); - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr)); + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr)); dma_desc->ctl0 = cpu_to_le32(ctl0); dma_desc->ctl1 = cpu_to_le32(ctl1); } @@ -904,7 +912,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac) BGMAC_CHIPCTL_1_IF_TYPE_RMII; char buf[2]; - if (nvram_getenv("et_swtype", buf, 1) > 0) { + if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) { if (kstrtou8(buf, 0, &et_swtype)) bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", buf); @@ -1382,7 +1390,7 @@ static int bgmac_probe(struct bcma_device *core) } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; - if (nvram_getenv("et0_no_txint", NULL, 0) == 0) + if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; /* TODO: reset the external phy. Specs are needed */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecac04a3687..4046f97378c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2760,6 +2760,7 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); + bnx2x_del_all_napi(bp); /* clear pf_load status, as it was already set */ if (IS_PF(bp)) @@ -3142,7 +3143,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) tsum = ~csum_fold(csum_add((__force __wsum) csum, csum_partial(t_header, -fix, 0))); - return bswab16(csum); + return bswab16(tsum); } static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 568205436a1..91ecd6a00d0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) break; default: BNX2X_ERR("Non valid capability ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); @@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) break; default: BNX2X_ERR("Non valid TC-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } return rval; @@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) return -EINVAL; } -static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); @@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, break; default: BNX2X_ERR("Non valid featrue-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "DCB disabled\n"); - rval = -EINVAL; + rval = 1; } return rval; @@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, break; default: BNX2X_ERR("Non valid featrue-ID\n"); - rval = -EINVAL; + rval = 1; break; } } else { DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); - rval = -EINVAL; + rval = 1; } return rval; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 9a674b14b40..edfa67adf2f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->lp_advertising |= ADVERTISED_2500baseX_Full; if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) cmd->lp_advertising |= ADVERTISED_10000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; } cmd->maxtxpkt = 0; @@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ADVERTISED_10000baseKR_Full)) bp->link_params.speed_cap_mask[cfg_idx] |= PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; + + if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; } } else { /* forced speed */ /* advertise the requested speed and duplex if supported */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index c6da77fa9d0..77ebae0ac64 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC6, &rx_tx_in_reset); - if (!rx_tx_in_reset) { + if ((!rx_tx_in_reset) && + (params->link_flags & + PHY_INITIALIZED)) { bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_sfi(phy, params); bnx2x_warpcore_reset_lane(bp, phy, 0); @@ -10422,6 +10424,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant off. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x0); + } } break; case LED_MODE_ON: @@ -10468,6 +10492,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x20); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant on. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x20); + } } break; @@ -10532,6 +10578,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Restore LED4 source to external link, + * and re-enable interrupts. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x40); + if (params->link_flags & + LINK_FLAGS_INT_DISABLED) { + bnx2x_link_int_enable(params); + params->link_flags &= + ~LINK_FLAGS_INT_DISABLED; + } + } } break; } @@ -11791,6 +11853,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_KR; phy->flags |= FLAGS_WC_DUAL_MODE; phy->supported &= (SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE | SUPPORTED_Pause | @@ -12465,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = PHY_INITIALIZED; /* Driver opens NIG-BRB filters */ bnx2x_set_rx_filter(params, 1); /* Check if link flap can be avoided */ @@ -12629,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params, struct bnx2x *bp = params->bp; vars->link_up = 0; vars->phy_flags = 0; + params->link_flags &= ~PHY_INITIALIZED; if (!params->lfa_base) return bnx2x_link_reset(params, vars, 1); /* @@ -13013,64 +13080,6 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp, return 0; } -static int bnx2x_84833_pre_init_phy(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 port) -{ - u16 val, cnt; - /* Wait for FW completing its initialization. */ - for (cnt = 0; cnt < 1500; cnt++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, &val); - if (!(val & (1<<15))) - break; - usleep_range(1000, 2000); - } - if (cnt >= 1500) { - DP(NETIF_MSG_LINK, "84833 reset timeout\n"); - return -EINVAL; - } - - /* Put the port in super isolate mode. */ - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val |= MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, bp, port); - return 0; -} - -int bnx2x_pre_init_phy(struct bnx2x *bp, - u32 shmem_base, - u32 shmem2_base, - u32 chip_id, - u8 port) -{ - int rc = 0; - struct bnx2x_phy phy; - if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base, - port, &phy) != 0) { - DP(NETIF_MSG_LINK, "populate_phy failed\n"); - return -EINVAL; - } - bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl); - switch (phy.type) { - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: - case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: - rc = bnx2x_84833_pre_init_phy(bp, &phy, port); - break; - default: - break; - } - return rc; -} - static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 ext_phy_type, u32 chip_id) @@ -13407,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params, vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; bnx2x_update_link_attr(params, vars->link_attr_sync); + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; /* Restart AN on leading lane */ bnx2x_warpcore_restart_AN_KR(phy, params); } @@ -13435,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params, return; } + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * since some switches tend to reinit the AN process and clear the + * advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } lane = bnx2x_get_warpcore_lane(phy, params); CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, lane); @@ -13495,7 +13514,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars) struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); if ((phy->supported & SUPPORTED_20000baseKR2_Full) && - (phy->speed_cap_mask & SPEED_20000)) + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) bnx2x_check_kr2_wa(params, vars, phy); bnx2x_check_over_curr(params, vars); if (vars->rx_tx_asic_rst) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index d25c7d79787..56c2aae4e2c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -307,7 +307,9 @@ struct link_params { struct bnx2x *bp; u16 req_fc_auto_adv; /* Should be set to TX / BOTH when req_flow_ctrl is set to AUTO */ - u16 rsrv1; + u16 link_flags; +#define LINK_FLAGS_INT_DISABLED (1<<0) +#define PHY_INITIALIZED (1<<1) u32 lfa_base; }; @@ -341,7 +343,8 @@ struct link_vars { u32 link_status; u32 eee_status; u8 fault_detected; - u8 rsrv1; + u8 check_kr2_recovery_cnt; +#define CHECK_KR2_RECOVERY_CNT 5 u16 periodic_flags; #define PERIODIC_FLAGS_LINK_EVENT 0x0001 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c4daee1b728..e81a747ea8c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8843,7 +8843,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { - /* Prevent incomming interrupts in IGU */ + /* Prevent incoming interrupts in IGU */ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 364e37ecbc5..198f6f1c9ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old { #define UPDATE_QSTAT(s, t) \ do { \ - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ } while (0) #define UPDATE_QSTAT_OLD(f) \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 36246129864..531eebf40d6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -98,7 +98,7 @@ static inline int bnx2x_pfvf_status_codes(int rc) } } -int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) +static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) { struct cstorm_vf_zone_data __iomem *zone_data = REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); @@ -141,7 +141,7 @@ int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) return 0; } -int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) +static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) { u32 me_reg; int tout = 10, interval = 100; /* Wait for 1 sec */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index fdb9b565541..17a972734ba 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp) tg3_ump_link_report(tp); } + + tp->link_up = netif_carrier_ok(tp->dev); } static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) @@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) return err; } -static void tg3_carrier_on(struct tg3 *tp) -{ - netif_carrier_on(tp->dev); - tp->link_up = true; -} - static void tg3_carrier_off(struct tg3 *tp) { netif_carrier_off(tp->dev); @@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp) return -EBUSY; if (netif_running(tp->dev) && tp->link_up) { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); tg3_link_report(tp); } @@ -4134,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp) tp->link_config.active_speed = tp->link_config.speed; tp->link_config.active_duplex = tp->link_config.duplex; + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + /* With autoneg disabled, 5715 only links up when the + * advertisement register has the configured speed + * enabled. + */ + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); + } + bmcr = 0; switch (tp->link_config.speed) { default: @@ -4262,9 +4266,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) { if (curr_link_up != tp->link_up) { if (curr_link_up) { - tg3_carrier_on(tp); + netif_carrier_on(tp->dev); } else { - tg3_carrier_off(tp); + netif_carrier_off(tp->dev); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } @@ -14600,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) if (j + len > block_end) goto partno; - memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + if (len >= sizeof(tp->fw_ver)) + len = sizeof(tp->fw_ver) - 1; + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, + &vpd_data[j]); } partno: diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index ceb0de0cf62..1194446f859 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,6 +22,7 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" + depends on GENERIC_HARDIRQS select NET_CORE select MACB ---help--- diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c6c05bfef0e..e707e31abd8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2347,7 +2347,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; - loff_t avail = file->f_path.dentry->d_inode->i_size; + loff_t avail = file_inode(file)->i_size; unsigned int mem = (uintptr_t)file->private_data & 3; struct adapter *adap = file->private_data - mem; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4ce62031f62..8049268ce0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_BASE 0 #define VPD_LEN 512 +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { u32 cclk_param, cclk_val; - int i, ret; + int i, ret, addr; int ec, sn; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); + if (ret < 0) + goto out; + addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); if (ret < 0) goto out; diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 1203be0436e..1df33c799c0 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -57,8 +57,8 @@ config TULIP be called tulip. config TULIP_MWI - bool "New bus configuration (EXPERIMENTAL)" - depends on TULIP && EXPERIMENTAL + bool "New bus configuration" + depends on TULIP ---help--- This configures your Tulip card specifically for the card and system cache line size type you are using. @@ -108,6 +108,7 @@ config TULIP_DM910X config DE4X5 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" depends on (PCI || EISA) + depends on VIRT_TO_BUS || ALPHA || PPC || SPARC select CRC32 ---help--- This is support for the DIGITAL series of PCI/EISA Ethernet cards. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 28ceb841418..29aff55f2ee 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -349,6 +349,7 @@ struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; + u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 071aea79d21..3c9b4f12e3e 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter) return 0; } -static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) +static u16 be_POST_stage_get(struct be_adapter *adapter) { u32 sem; - u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH : - SLIPORT_SEMAPHORE_OFFSET_BE; - pci_read_config_dword(adapter->pdev, reg, &sem); - *stage = sem & POST_STAGE_MASK; - - if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK) - return -1; + if (BEx_chip(adapter)) + sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); else - return 0; + pci_read_config_dword(adapter->pdev, + SLIPORT_SEMAPHORE_OFFSET_SH, &sem); + + return sem & POST_STAGE_MASK; } int lancer_wait_ready(struct be_adapter *adapter) @@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter) } do { - status = be_POST_stage_get(adapter, &stage); - if (status) { - dev_err(dev, "POST error; stage=0x%x\n", stage); - return -1; - } else if (stage != POST_STAGE_ARMFW_RDY) { - if (msleep_interruptible(2000)) { - dev_err(dev, "Waiting for POST aborted\n"); - return -EINTR; - } - timeout += 2; - } else { + stage = be_POST_stage_get(adapter); + if (stage == POST_STAGE_ARMFW_RDY) return 0; + + dev_info(dev, "Waiting for POST, %ds elapsed\n", + timeout); + if (msleep_interruptible(2000)) { + dev_err(dev, "Waiting for POST aborted\n"); + return -EINTR; } + timeout += 2; } while (timeout < 60); dev_err(dev, "POST timeout; stage=0x%x\n", stage); diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 541d4530d5b..62dc220695f 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -32,8 +32,8 @@ #define MPU_EP_CONTROL 0 /********** MPU semphore: used for SH & BE *************/ -#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c -#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 +#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ #define POST_STAGE_MASK 0x0000FFFF #define POST_ERR_MASK 0x1 #define POST_ERR_SHIFT 31 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3860888ac71..08e54f3d288 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev) static void be_unmap_pci_bars(struct be_adapter *adapter) { + if (adapter->csr) + pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); } @@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter) adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> SLI_INTF_IF_TYPE_SHIFT; + if (BEx_chip(adapter) && be_physfn(adapter)) { + adapter->csr = pci_iomap(adapter->pdev, 2, 0); + if (adapter->csr == NULL) + return -ENOMEM; + } + addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); if (addr == NULL) goto pci_map_err; @@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) pci_restore_state(pdev); /* Check if card is ok and fw is ready */ + dev_info(&adapter->pdev->dev, + "Waiting for FW to be ready after EEH reset\n"); status = be_fw_wait_ready(adapter); if (status) return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 29d82cf1528..f292c3aa423 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct bufdesc *bdp; void *bufaddr; unsigned short status; - unsigned long flags; + unsigned int index; if (!fep->link) { /* Link is down or autonegotiation is in progress. */ return NETDEV_TX_BUSY; } - spin_lock_irqsave(&fep->hw_lock, flags); /* Fill in a Tx ring entry */ bdp = fep->cur_tx; @@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * This should not happen, since ndev->tbusy should be set. */ printk("%s: tx queue full!.\n", ndev->name); - spin_unlock_irqrestore(&fep->hw_lock, flags); return NETDEV_TX_BUSY; } @@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * 4-byte boundaries. Use bounce buffers to copy data * and get it aligned. Ugh. */ + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->tx_bd_base; + else + index = bdp - fep->tx_bd_base; + if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { - unsigned int index; - if (fep->bufdesc_ex) - index = (struct bufdesc_ex *)bdp - - (struct bufdesc_ex *)fep->tx_bd_base; - else - index = bdp - fep->tx_bd_base; memcpy(fep->tx_bounce[index], skb->data, skb->len); bufaddr = fep->tx_bounce[index]; } @@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) swap_buffer(bufaddr, skb->len); /* Save skb pointer */ - fep->tx_skbuff[fep->skb_cur] = skb; - - ndev->stats.tx_bytes += skb->len; - fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; + fep->tx_skbuff[index] = skb; /* Push the data cache so the CPM does not get stale memory * data. @@ -331,29 +326,72 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } } - /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); - /* If this was the last BD in the ring, start at the beginning again. */ if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - if (bdp == fep->dirty_tx) { - fep->tx_full = 1; + fep->cur_tx = bdp; + + if (fep->cur_tx == fep->dirty_tx) netif_stop_queue(ndev); - } - fep->cur_tx = bdp; + /* Trigger transmission start */ + writel(0, fep->hwp + FEC_X_DES_ACTIVE); skb_tx_timestamp(skb); - spin_unlock_irqrestore(&fep->hw_lock, flags); - return NETDEV_TX_OK; } +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct bufdesc *bdp; + unsigned int i; + + /* Initialize the receive buffer descriptors. */ + bdp = fep->rx_bd_base; + for (i = 0; i < RX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + + fep->cur_rx = fep->rx_bd_base; + + /* ...and the same for transmit */ + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + for (i = 0; i < TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp->cbd_sc |= BD_SC_WRAP; + fep->dirty_tx = bdp; +} + /* This function is called to start or restart the FEC during a link * change. This only happens when switching between half and full * duplex. @@ -397,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); + fec_enet_bd_init(ndev); + /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) @@ -406,11 +446,7 @@ fec_restart(struct net_device *ndev, int duplex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; - fep->cur_rx = fep->rx_bd_base; - /* Reset SKB transmit buffers. */ - fep->skb_cur = fep->skb_dirty = 0; for (i = 0; i <= TX_RING_MOD_MASK; i++) { if (fep->tx_skbuff[i]) { dev_kfree_skb_any(fep->tx_skbuff[i]); @@ -573,20 +609,35 @@ fec_enet_tx(struct net_device *ndev) struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; + int index = 0; fep = netdev_priv(ndev); - spin_lock(&fep->hw_lock); bdp = fep->dirty_tx; + /* get next bdp of dirty_tx */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = fep->tx_bd_base; + else + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { - if (bdp == fep->cur_tx && fep->tx_full == 0) + + /* current queue is empty */ + if (bdp == fep->cur_tx) break; + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->tx_bd_base; + else + index = bdp - fep->tx_bd_base; + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; - skb = fep->tx_skbuff[fep->skb_dirty]; + skb = fep->tx_skbuff[index]; + /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | @@ -631,8 +682,9 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->tx_skbuff[fep->skb_dirty] = NULL; - fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; + fep->tx_skbuff[index] = NULL; + + fep->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ if (status & BD_ENET_TX_WRAP) @@ -642,14 +694,12 @@ fec_enet_tx(struct net_device *ndev) /* Since we have freed up a buffer, the ring is no longer full */ - if (fep->tx_full) { - fep->tx_full = 0; + if (fep->dirty_tx != fep->cur_tx) { if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } - fep->dirty_tx = bdp; - spin_unlock(&fep->hw_lock); + return; } @@ -816,7 +866,7 @@ fec_enet_interrupt(int irq, void *dev_id) int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events, fep->hwp + FEC_IEVENT); - if (int_events & FEC_ENET_RXF) { + if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) { ret = IRQ_HANDLED; /* Disable the RX interrupt */ @@ -827,15 +877,6 @@ fec_enet_interrupt(int irq, void *dev_id) } } - /* Transmit OK, or non-fatal error. Update the buffer - * descriptors. FEC handles all errors, we just discover - * them as part of the transmit process. - */ - if (int_events & FEC_ENET_TXF) { - ret = IRQ_HANDLED; - fec_enet_tx(ndev); - } - if (int_events & FEC_ENET_MII) { ret = IRQ_HANDLED; complete(&fep->mdio_done); @@ -851,6 +892,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) int pkts = fec_enet_rx(ndev, budget); struct fec_enet_private *fep = netdev_priv(ndev); + fec_enet_tx(ndev); + if (pkts < budget) { napi_complete(napi); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -939,24 +982,28 @@ static void fec_enet_adjust_link(struct net_device *ndev) goto spin_unlock; } - /* Duplex link change */ if (phy_dev->link) { - if (fep->full_duplex != phy_dev->duplex) { - fec_restart(ndev, phy_dev->duplex); - /* prevent unnecessary second fec_restart() below */ + if (!fep->link) { fep->link = phy_dev->link; status_change = 1; } - } - /* Link on or off change */ - if (phy_dev->link != fep->link) { - fep->link = phy_dev->link; - if (phy_dev->link) + if (fep->full_duplex != phy_dev->duplex) + status_change = 1; + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) fec_restart(ndev, phy_dev->duplex); - else + } else { + if (fep->link) { fec_stop(ndev); - status_change = 1; + status_change = 1; + } } spin_unlock: @@ -1333,7 +1380,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int i; + unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -1357,7 +1404,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) static int fec_enet_alloc_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int i; + unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -1442,6 +1489,7 @@ fec_enet_close(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); /* Don't know what to do yet. */ + napi_disable(&fep->napi); fep->opened = 0; netif_stop_queue(ndev); fec_stop(ndev); @@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc *cbd_base; - struct bufdesc *bdp; - int i; /* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, @@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; } + memset(cbd_base, 0, PAGE_SIZE); spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1631,33 +1678,6 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { - - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); - } - - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); - bdp->cbd_sc |= BD_SC_WRAP; - fec_restart(ndev, 0); return 0; @@ -1782,24 +1802,6 @@ fec_probe(struct platform_device *pdev) fep->phy_interface = ret; } - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq < 0) { - if (i) - break; - ret = irq; - goto failed_irq; - } - ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); - if (ret) { - while (--i >= 0) { - irq = platform_get_irq(pdev, i); - free_irq(irq, ndev); - } - goto failed_irq; - } - } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); @@ -1850,6 +1852,24 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_init; + for (i = 0; i < FEC_IRQ_NUM; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + if (i) + break; + ret = irq; + goto failed_irq; + } + ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); + if (ret) { + while (--i >= 0) { + irq = platform_get_irq(pdev, i); + free_irq(irq, ndev); + } + goto failed_irq; + } + } + ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -1867,6 +1887,12 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_init: + for (i = 0; i < FEC_IRQ_NUM; i++) { + irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } +failed_irq: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -1874,12 +1900,6 @@ failed_regulator: clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } -failed_irq: iounmap(fep->hwp); failed_ioremap: free_netdev(ndev); @@ -1899,17 +1919,17 @@ fec_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); fec_enet_mii_remove(fep); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } del_timer_sync(&fep->time_keep); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); + for (i = 0; i < FEC_IRQ_NUM; i++) { + int irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } iounmap(fep->hwp); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 01579b8e37c..eb437296283 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -97,6 +97,13 @@ struct bufdesc { unsigned short cbd_sc; /* Control and status info */ unsigned long cbd_bufaddr; /* Buffer address */ }; +#else +struct bufdesc { + unsigned short cbd_sc; /* Control and status info */ + unsigned short cbd_datlen; /* Data length */ + unsigned long cbd_bufaddr; /* Buffer address */ +}; +#endif struct bufdesc_ex { struct bufdesc desc; @@ -107,14 +114,6 @@ struct bufdesc_ex { unsigned short res0[4]; }; -#else -struct bufdesc { - unsigned short cbd_sc; /* Control and status info */ - unsigned short cbd_datlen; /* Data length */ - unsigned long cbd_bufaddr; /* Buffer address */ -}; -#endif - /* * The following definitions courtesy of commproc.h, which where * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). @@ -214,8 +213,6 @@ struct fec_enet_private { unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; struct sk_buff *rx_skbuff[RX_RING_SIZE]; - ushort skb_cur; - ushort skb_dirty; /* CPM dual port RAM relative addresses */ dma_addr_t bd_dma; @@ -227,7 +224,6 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; - uint tx_full; /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ spinlock_t hw_lock; @@ -244,6 +240,7 @@ struct fec_enet_private { phy_interface_t phy_interface; int link; int full_duplex; + int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; int bufdesc_ex; diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 7f91b0c5c26..77943a6a1b8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -41,8 +41,8 @@ #include <asm/delay.h> #include <asm/mpc52xx.h> -#include <sysdev/bestcomm/bestcomm.h> -#include <sysdev/bestcomm/fec.h> +#include <linux/fsl/bestcomm/bestcomm.h> +#include <linux/fsl/bestcomm/fec.h> #include "fec_mpc52xx.h" diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 1f17ca0f220..0d8df400a47 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) spin_unlock_irqrestore(&fep->tmreg_lock, flags); } +EXPORT_SYMBOL(fec_ptp_start_cyclecounter); /** * fec_ptp_adjfreq - adjust ptp cycle frequency @@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(fec_ptp_ioctl); /** * fec_time_keep - call timecounter_read every second to avoid timer overrun @@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) pr_info("registered PHC device on %s\n", ndev->name); } } +EXPORT_SYMBOL(fec_ptp_init); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4b5e8a69248..d2c5441d1bf 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2906,21 +2906,23 @@ static void gfar_netpoll(struct net_device *dev) /* If the device has multiple interrupts, run tx/rx */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { for (i = 0; i < priv->num_grps; i++) { - disable_irq(priv->gfargrp[i].interruptTransmit); - disable_irq(priv->gfargrp[i].interruptReceive); - disable_irq(priv->gfargrp[i].interruptError); - gfar_interrupt(priv->gfargrp[i].interruptTransmit, - &priv->gfargrp[i]); - enable_irq(priv->gfargrp[i].interruptError); - enable_irq(priv->gfargrp[i].interruptReceive); - enable_irq(priv->gfargrp[i].interruptTransmit); + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + disable_irq(gfar_irq(grp, RX)->irq); + disable_irq(gfar_irq(grp, ER)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, ER)->irq); + enable_irq(gfar_irq(grp, RX)->irq); + enable_irq(gfar_irq(grp, TX)->irq); } } else { for (i = 0; i < priv->num_grps; i++) { - disable_irq(priv->gfargrp[i].interruptTransmit); - gfar_interrupt(priv->gfargrp[i].interruptTransmit, - &priv->gfargrp[i]); - enable_irq(priv->gfargrp[i].interruptTransmit); + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, TX)->irq); } } } diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig index c6a87625898..6231bc02b96 100644 --- a/drivers/net/ethernet/fujitsu/Kconfig +++ b/drivers/net/ethernet/fujitsu/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_FUJITSU bool "Fujitsu devices" default y - depends on ISA || PCMCIA || (ISA && EXPERIMENTAL) + depends on ISA || PCMCIA ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig index 955d929cd00..9521e68aa3b 100644 --- a/drivers/net/ethernet/i825xx/Kconfig +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -5,9 +5,7 @@ config NET_VENDOR_I825XX bool "Intel (82586/82593/82596) devices" default y - depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \ - ARCH_ACORN || SNI_RM || SUN3 || \ - GSC || BVME6000 || MVME16x || EXPERIMENTAL) + depends on NET_VENDOR_INTEL ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig index 3aff81d7989..5119ef18953 100644 --- a/drivers/net/ethernet/icplus/Kconfig +++ b/drivers/net/ethernet/icplus/Kconfig @@ -4,7 +4,7 @@ config IP1000 tristate "IP1000 Gigabit Ethernet support" - depends on PCI && EXPERIMENTAL + depends on PCI select NET_CORE select MII ---help--- diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3d5f6d46375..05f7264c51f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -5,11 +5,6 @@ config NET_VENDOR_INTEL bool "Intel devices" default y - depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ - ARCH_ACORN || SNI_RM || SUN3 || \ - GSC || BVME6000 || MVME16x || \ - (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ - EXPERIMENTAL ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 2c1813737f6..f91a8f3f9d4 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -36,6 +36,7 @@ #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/mdio.h> +#include <linux/pm_runtime.h> #include "e1000.h" @@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, return 0; } +static int e1000e_ethtool_begin(struct net_device *netdev) +{ + return pm_runtime_get_sync(netdev->dev.parent); +} + +static void e1000e_ethtool_complete(struct net_device *netdev) +{ + pm_runtime_put_sync(netdev->dev.parent); +} + static const struct ethtool_ops e1000_ethtool_ops = { + .begin = e1000e_ethtool_begin, + .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index dff7bff8b8e..121a865c7fb 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -782,6 +782,59 @@ release: } /** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + s32 ret_val = 0; + + if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { + u16 kmrn_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + return ret_val; +} + +/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * @@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; @@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + if (hw->phy.type == e1000_phy_i217) { - u16 phy_reg; + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index b6d3174d7d2..8bf4655c2e1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -92,6 +92,8 @@ #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a177b8b65c4..948b86ffa4f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); adapter->idle_check = true; + hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); /* fire a link status change interrupt to start the watchdog */ @@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; + pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); @@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); + pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults @@ -5887,8 +5890,7 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, - bool runtime) +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, } e1000e_reset_interrupt_capability(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; - status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, ew32(WUFC, 0); } - *enable_wake = !!wufc; - - /* make sure adapter isn't asleep if manageability is enabled */ - if ((adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - *enable_wake = true; - if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, */ e1000e_release_hw_control(adapter); - pci_disable_device(pdev); - - return 0; -} - -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) -{ - if (sleep && wake) { - pci_prepare_to_sleep(pdev); - return; - } - - pci_wake_from_d3(pdev, wake); - pci_set_power_state(pdev, PCI_D3hot); -} - -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, - bool wake) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev_priv(netdev); + pci_clear_master(pdev); /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - e1000_power_off(pdev, sleep, wake); + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); - } else { - e1000_power_off(pdev, sleep, wake); } + + return 0; } #ifdef CONFIG_PCIEASPM @@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); + pci_set_master(pdev); e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { @@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev) static int e1000_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __e1000_shutdown(pdev, &wake, false); - if (!retval) - e1000_complete_shutdown(pdev, true, wake); - return retval; + return __e1000_shutdown(pdev, false); } static int e1000_resume(struct device *dev) @@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (e1000e_pm_ready(adapter)) { - bool wake; - - __e1000_shutdown(pdev, &wake, true); - } + if (!e1000e_pm_ready(adapter)) + return 0; - return 0; + return __e1000_shutdown(pdev, true); } static int e1000_idle(struct device *dev) @@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev) static void e1000_shutdown(struct pci_dev *pdev) { - bool wake = false; - - __e1000_shutdown(pdev, &wake, false); - - if (system_state == SYSTEM_POWER_OFF) - e1000_complete_shutdown(pdev, false, wake); + __e1000_shutdown(pdev, false); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - pci_set_master(pdev); pdev->state_saved = true; pci_restore_state(pdev); + pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 794fe149766..a7e6a3e3725 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -42,6 +42,7 @@ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 84e7e0909de..12b1d848080 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) switch (hw->phy.type) { case e1000_phy_i210: case e1000_phy_m88: - if (hw->phy.id == I347AT4_E_PHY_ID || - hw->phy.id == M88E1112_E_PHY_ID) + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); - else + break; + default: ret_val = igb_copper_link_setup_m88(hw); + break; + } break; case e1000_phy_igp_3: ret_val = igb_copper_link_setup_igp(hw); @@ -1813,27 +1818,32 @@ out: **/ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) { - u32 dtxswc; + u32 reg_val, reg_offset; switch (hw->mac.type) { case e1000_82576: + reg_offset = E1000_DTXSWC; + break; case e1000_i350: - dtxswc = rd32(E1000_DTXSWC); - if (enable) { - dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - /* The PF can spoof - it has to in order to - * support emulation mode NICs */ - dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); - } else { - dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | - E1000_DTXSWC_VLAN_SPOOF_MASK); - } - wr32(E1000_DTXSWC, dtxswc); + reg_offset = E1000_TXSWC; break; default: - break; + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); } + wr32(reg_offset, reg_val); } /** diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d27edbc6392..25151401c2a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -447,7 +447,7 @@ struct igb_adapter { #endif struct i2c_algo_bit_data i2c_algo; struct i2c_adapter i2c_adap; - struct igb_i2c_client_list *i2c_clients; + struct i2c_client *i2c_client; }; #define IGB_FLAG_HAS_MSI (1 << 0) diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0a9b073d0b0..0478a1abe54 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -39,6 +39,10 @@ #include <linux/pci.h> #ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + /* hwmon callback functions */ static ssize_t igb_hwmon_show_location(struct device *dev, struct device_attribute *attr, @@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter) unsigned int i; int n_attrs; int rc = 0; + struct i2c_client *client = NULL; /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) @@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter) if (rc) goto exit; + /* init i2c_client */ + client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); + if (client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device..\n"); + goto exit; + } + adapter->i2c_client = client; + /* Allocation space for max attributes * max num sensors * values (loc, temp, max, caution) */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed79a1c53b5..8496adfc6a6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) return; } -static const struct i2c_board_info i350_sensor_info = { - I2C_BOARD_INFO("i350bb", 0Xf8), -}; - /* igb_init_i2c - Init I2C interface * @adapter: pointer to adapter structure * @@ -2546,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; - igb_enable_sriov(pdev, max_vfs); pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } @@ -2656,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter) if (max_vfs > 7) { dev_warn(&pdev->dev, "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; + max_vfs = adapter->vfs_allocated_count = 7; } else adapter->vfs_allocated_count = max_vfs; if (adapter->vfs_allocated_count) @@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, /* If we spanned a buffer we have a huge mess so test for it */ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); - /* Guarantee this function can be used by verifying buffer sizes */ - BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD + - NET_IP_ALIGN + - IGB_TS_HDR_LEN + - ETH_FRAME_LEN + - ETH_FCS_LEN)); - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; page = rx_buffer->page; prefetchw(page); @@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) } } -static DEFINE_SPINLOCK(i2c_clients_lock); - -/* igb_get_i2c_client - returns matching client - * in adapters's client list. - * @adapter: adapter struct - * @dev_addr: device address of i2c needed. - */ -static struct i2c_client * -igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) -{ - ulong flags; - struct igb_i2c_client_list *client_list; - struct i2c_client *client = NULL; - struct i2c_board_info client_info = { - I2C_BOARD_INFO("igb", 0x00), - }; - - spin_lock_irqsave(&i2c_clients_lock, flags); - client_list = adapter->i2c_clients; - - /* See if we already have an i2c_client */ - while (client_list) { - if (client_list->client->addr == (dev_addr >> 1)) { - client = client_list->client; - goto exit; - } else { - client_list = client_list->next; - } - } - - /* no client_list found, create a new one */ - client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC); - if (client_list == NULL) - goto exit; - - /* dev_addr passed to us is left-shifted by 1 bit - * i2c_new_device call expects it to be flush to the right. - */ - client_info.addr = dev_addr >> 1; - client_info.platform_data = adapter; - client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info); - if (client_list->client == NULL) { - dev_info(&adapter->pdev->dev, - "Failed to create new i2c device..\n"); - goto err_no_client; - } - - /* insert new client at head of list */ - client_list->next = adapter->i2c_clients; - adapter->i2c_clients = client_list; - - client = client_list->client; - goto exit; - -err_no_client: - kfree(client_list); -exit: - spin_unlock_irqrestore(&i2c_clients_lock, flags); - return client; -} - /* igb_read_i2c_byte - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read @@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = 0; @@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); - struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); + struct i2c_client *this_client = adapter->i2c_client; s32 status; u16 swfw_mask = E1000_SWFW_PHY0_SM; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0987822359f..0a237507ee8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter) case e1000_82576: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; - adapter->ptp_caps.max_adj = 1000000000; + adapter->ptp_caps.max_adj = 999999881; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index f4d2e9e3c6d..c3f1afd8690 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, union ixgbe_atr_input *mask = &adapter->fdir_mask; struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *rule = NULL; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (fsp->location <= rule->sw_idx) break; @@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *rule; int cnt = 0; /* report total rule count */ cmd->data = (1024 << adapter->fdir_pballoc) - 2; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { if (cnt == cmd->rule_cnt) return -EMSGSIZE; @@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2, *parent; - struct ixgbe_fdir_filter *rule; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule, *parent; int err = -EINVAL; parent = NULL; rule = NULL; - hlist_for_each_entry_safe(rule, node, node2, + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, fdir_node) { /* hash found, or no matching entry */ if (rule->sw_idx >= sw_idx) break; - parent = node; + parent = rule; } /* if there is an old rule occupying our place remove it */ @@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_after(parent, &input->fdir_node); + hlist_add_after(&parent->fdir_node, &input->fdir_node); else hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 68478d6dfa2..db5611ae407 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); @@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) if (!hlist_empty(&adapter->fdir_filter_list)) ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); - hlist_for_each_entry_safe(filter, node, node2, + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, @@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) { - struct hlist_node *node, *node2; + struct hlist_node *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); - hlist_for_each_entry_safe(filter, node, node2, + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c3db6cd69b6..2b6cb5ca48e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -944,9 +944,17 @@ free_queue_irqs: free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; + /* This failure is non-recoverable - it indicates the system is + * out of MSIX vector resources and the VF driver cannot run + * without them. Set the number of msix vectors to zero + * indicating that not enough can be allocated. The error + * will be returned to the user indicating device open failed. + * Any further attempts to force the driver to open will also + * fail. The only way to recover is to unload the driver and + * reload it again. If the system has recovered some MSIX + * vectors then it may succeed. + */ + adapter->num_msix_vectors = 0; return err; } @@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int err; + /* A previous failure to open the device because of a lack of + * available MSIX vector resources may have reset the number + * of msix vectors variable to zero. The only way to recover + * is to unload/reload the driver and hope that the system has + * been able to recover some MSIX vector resources. + */ + if (!adapter->num_msix_vectors) + return -ENOMEM; + /* disallow open during test */ if (test_bit(__IXGBEVF_TESTING, &adapter->state)) return -EBUSY; @@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev) err_req_irq: ixgbevf_down(adapter); - ixgbevf_free_irq(adapter); err_setup_rx: ixgbevf_free_all_rx_resources(adapter); err_setup_tx: diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a2127489af..bfdb0686039 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev) return 0; err_free: - kfree(dev); + free_netdev(dev); err_out: return err; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 29140502b71..6562c736a1d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) /* mii management interface *************************************************/ +static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp) +{ + u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); + u32 autoneg_disable = FORCE_LINK_PASS | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_FOR_DUPLEX; + + if (mp->phy->autoneg == AUTONEG_ENABLE) { + /* enable auto negotiation */ + pscr &= ~autoneg_disable; + goto out_write; + } + + pscr |= autoneg_disable; + + if (mp->phy->speed == SPEED_1000) { + /* force gigabit, half duplex not supported */ + pscr |= SET_GMII_SPEED_TO_1000; + pscr |= SET_FULL_DUPLEX_MODE; + goto out_write; + } + + pscr &= ~SET_GMII_SPEED_TO_1000; + + if (mp->phy->speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + else + pscr &= ~SET_MII_SPEED_TO_100; + + if (mp->phy->duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + else + pscr &= ~SET_FULL_DUPLEX_MODE; + +out_write: + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) { struct mv643xx_eth_shared_private *msp = dev_id; @@ -1499,6 +1538,7 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; if (mp->phy == NULL) return -EINVAL; @@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ cmd->advertising &= ~ADVERTISED_1000baseT_Half; - return phy_ethtool_sset(mp->phy, cmd); + ret = phy_ethtool_sset(mp->phy, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static void mv643xx_eth_get_drvinfo(struct net_device *dev, @@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev) static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; - if (mp->phy != NULL) - return phy_mii_ioctl(mp->phy, ifr, cmd); + if (mp->phy == NULL) + return -ENOTSUPP; - return -EOPNOTSUPP; + ret = phy_mii_ioctl(mp->phy, ifr, cmd); + if (!ret) + mv643xx_adjust_pscr(mp); + return ret; } static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b6025c305e1..cd345b8969b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -12,7 +12,6 @@ */ #include <linux/kernel.h> -#include <linux/version.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 7e64033d7de..0706623cfb9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index e3c3d122979..fc27800e9c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -64,7 +64,7 @@ static const char mlx4_en_version[] = /* Enable RSS UDP traffic */ MLX4_EN_PARM_INT(udp_rss, 1, - "Enable RSS for incomming UDP traffic or disabled (0)"); + "Enable RSS for incoming UDP traffic or disabled (0)"); /* Priority pausing */ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." @@ -198,7 +198,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) flush_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue); - mlx4_mr_free(dev, &mdev->mr); + (void) mlx4_mr_free(dev, &mdev->mr); iounmap(mdev->uar_map); mlx4_uar_free(dev, &mdev->priv_uar); mlx4_pd_free(dev, mdev->priv_pdn); @@ -303,7 +303,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) return mdev; err_mr: - mlx4_mr_free(dev, &mdev->mr); + (void) mlx4_mr_free(dev, &mdev->mr); err_map: if (!mdev->uar_map) iounmap(mdev->uar_map); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5088dc5c3d1..f278b10ef71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -225,11 +225,10 @@ static inline struct mlx4_en_filter * mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, __be16 src_port, __be16 dst_port) { - struct hlist_node *elem; struct mlx4_en_filter *filter; struct mlx4_en_filter *ret = NULL; - hlist_for_each_entry(filter, elem, + hlist_for_each_entry(filter, filter_hash_bucket(priv, src_ip, dst_ip, src_port, dst_port), filter_chain) { @@ -566,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int qpn = priv->base_qpn; - u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); - - en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", - priv->dev->dev_addr); - mlx4_unregister_mac(dev, priv->port, mac); + u64 mac; - if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + } else { struct mlx4_mac_entry *entry; - struct hlist_node *n, *tmp; + struct hlist_node *tmp; struct hlist_head *bucket; - unsigned int mac_hash; + unsigned int i; - mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; - bucket = &priv->mac_hash[mac_hash]; - hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { - if (ether_addr_equal_64bits(entry->mac, - priv->dev->dev_addr)) { - en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", - priv->port, priv->dev->dev_addr, qpn); + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + mac = mlx4_en_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, qpn, entry->reg_id); - mlx4_qp_release_range(dev, qpn, 1); + mlx4_unregister_mac(dev, priv->port, mac); hlist_del_rcu(&entry->hlist); kfree_rcu(entry, rcu); - break; } } + + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", + priv->port, qpn); + mlx4_qp_release_range(dev, qpn, 1); + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; } } @@ -609,11 +612,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, struct hlist_head *bucket; unsigned int mac_hash; struct mlx4_mac_entry *entry; - struct hlist_node *n, *tmp; + struct hlist_node *tmp; u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; - hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { if (ether_addr_equal_64bits(entry->mac, prev_mac)) { mlx4_en_uc_steer_release(priv, entry->mac, qpn, entry->reg_id); @@ -651,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr) return mac; } -static int mlx4_en_set_mac(struct net_device *dev, void *addr) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - struct sockaddr *saddr = addr; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - queue_work(mdev->workqueue, &priv->mac_task); - return 0; -} - -static void mlx4_en_do_set_mac(struct work_struct *work) +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) { - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - mac_task); - struct mlx4_en_dev *mdev = priv->mdev; int err = 0; - mutex_lock(&mdev->state_lock); if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_en_replace_mac(priv, priv->base_qpn, @@ -684,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work) } else en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + return err; +} + +static int mlx4_en_set_mac(struct net_device *dev, void *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + + mutex_lock(&mdev->state_lock); + err = mlx4_en_do_set_mac(priv); mutex_unlock(&mdev->state_lock); + + return err; } static void mlx4_en_clear_list(struct net_device *dev) @@ -1019,7 +1023,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, { struct netdev_hw_addr *ha; struct mlx4_mac_entry *entry; - struct hlist_node *n, *tmp; + struct hlist_node *tmp; bool found; u64 mac; int err = 0; @@ -1035,7 +1039,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, /* find what to remove */ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { bucket = &priv->mac_hash[i]; - hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { found = false; netdev_for_each_uc_addr(ha, dev) { if (ether_addr_equal_64bits(entry->mac, @@ -1078,7 +1082,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, netdev_for_each_uc_addr(ha, dev) { found = false; bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; - hlist_for_each_entry(entry, n, bucket, hlist) { + hlist_for_each_entry(entry, bucket, hlist) { if (ether_addr_equal_64bits(entry->mac, ha->addr)) { found = true; break; @@ -1349,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { - queue_work(mdev->workqueue, &priv->mac_task); + mlx4_en_do_set_mac(priv); mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; } mutex_unlock(&mdev->state_lock); @@ -1633,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + /* Remove flow steering rules for the port*/ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ASSERT_RTNL(); + list_for_each_entry_safe(flow, tmp_flow, + &priv->ethtool_list, list) { + mlx4_flow_detach(mdev->dev, flow->id); + list_del(&flow->list); + } + } + mlx4_en_destroy_drop_qp(priv); /* Free TX Rings */ @@ -1653,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) mdev->mac_removed[priv->port] = 1; - /* Remove flow steering rules for the port*/ - if (mdev->dev->caps.steering_mode == - MLX4_STEERING_MODE_DEVICE_MANAGED) { - ASSERT_RTNL(); - list_for_each_entry_safe(flow, tmp_flow, - &priv->ethtool_list, list) { - mlx4_flow_detach(mdev->dev, flow->id); - list_del(&flow->list); - } - } - /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); @@ -1829,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); - if (!priv->dev->rx_cpu_rmap) - goto err; + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } #endif return 0; @@ -2067,7 +2073,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, err = -ENOMEM; goto out; } - priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS, + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq) { err = -ENOMEM; @@ -2079,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index ce38654bbdd..c7f856308e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/mlx4/qp.h> #include <linux/skbuff.h> +#include <linux/rculist.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> @@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (is_multicast_ether_addr(ethh->h_dest)) { struct mlx4_mac_entry *entry; - struct hlist_node *n; struct hlist_head *bucket; unsigned int mac_hash; @@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; bucket = &priv->mac_hash[mac_hash]; rcu_read_lock(); - hlist_for_each_entry_rcu(entry, n, bucket, hlist) { + hlist_for_each_entry_rcu(entry, bucket, hlist) { if (ether_addr_equal_64bits(entry->mac, ethh->h_source)) { rcu_read_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 251ae2f9311..8e3123a1df8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_slave_event_eq_info *event_eq = priv->mfunc.master.slave_state[slave].event_eq; u32 in_modifier = vhcr->in_modifier; - u32 eqn = in_modifier & 0x1FF; + u32 eqn = in_modifier & 0x3FF; u64 in_param = vhcr->in_param; int err = 0; int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 38b62c78d5d..f6245579962 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -762,15 +762,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, u64 flags; int err = 0; u8 field; + u32 bmme_flags; err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; - /* add port mng change event capability unconditionally to slaves */ + /* add port mng change event capability and disable mw type 1 + * unconditionally to slaves + */ MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; + flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); /* For guests, report Blueflame disabled */ @@ -778,6 +782,19 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); + /* For guests, disable mw type 2 */ + MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; + MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + + /* turn off device-managed steering capability if not enabled */ + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(field, outbox->buf, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + } return 0; } @@ -1203,6 +1220,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) #define INIT_HCA_TPT_OFFSET 0x0f0 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) @@ -1319,6 +1337,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) /* TPT attributes */ MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); @@ -1415,6 +1434,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 3af33ff669c..151c2bb380a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -170,6 +170,7 @@ struct mlx4_init_hca_param { u8 log_mc_table_sz; u8 log_mpt_sz; u8 log_uar_sz; + u8 mw_enabled; /* Enable memory windows */ u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index b9dde139dac..16abde20e1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1431,6 +1431,10 @@ static int mlx4_init_hca(struct mlx4_dev *dev) init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca.mw_enabled = 0; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || + dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) + init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); if (err) @@ -1551,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ed4a6959e82..d738454116a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -60,6 +60,8 @@ #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 #define MLX4_FS_NUM_MCG (1 << 17) +#define INIT_HCA_TPT_MW_ENABLE (1 << 7) + #define MLX4_NUM_UP 8 #define MLX4_NUM_TC 8 #define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ @@ -113,10 +115,10 @@ enum { MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT }; -enum mlx4_mr_state { - MLX4_MR_DISABLED = 0, - MLX4_MR_EN_HW, - MLX4_MR_EN_SW +enum mlx4_mpt_state { + MLX4_MPT_DISABLED = 0, + MLX4_MPT_EN_HW, + MLX4_MPT_EN_SW }; #define MLX4_COMM_TIME 10000 @@ -263,6 +265,22 @@ struct mlx4_icm_table { struct mlx4_icm **icm; }; +#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MLX4_MPT_FLAG_FREE (0x3UL << 28) +#define MLX4_MPT_FLAG_MIO (1 << 17) +#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) +#define MLX4_MPT_FLAG_REGION (1 << 8) + +#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) +#define MLX4_MPT_PD_FLAG_RAE (1 << 28) +#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) + +#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) + +#define MLX4_MPT_STATUS_SW 0xF0 +#define MLX4_MPT_STATUS_HW 0x00 + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -863,10 +881,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); -int __mlx4_mr_reserve(struct mlx4_dev *dev); -void __mlx4_mr_release(struct mlx4_dev *dev, u32 index); -int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index); -void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index); +int __mlx4_mpt_reserve(struct mlx4_dev *dev); +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index); +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); @@ -1217,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); static inline void set_param_l(u64 *arg, u32 val) { - *((u32 *)arg) = val; + *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; } static inline void set_param_h(u64 *arg, u32 val) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c313d7e943a..f710b7ce0dc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -509,7 +509,6 @@ struct mlx4_en_priv { struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct mac_task; struct work_struct watchdog_task; struct work_struct linkstate_task; struct delayed_work stats_task; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index c202d3ad2a0..f91719a08cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -44,20 +44,6 @@ #include "mlx4.h" #include "icm.h" -#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) -#define MLX4_MPT_FLAG_FREE (0x3UL << 28) -#define MLX4_MPT_FLAG_MIO (1 << 17) -#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) -#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) -#define MLX4_MPT_FLAG_REGION (1 << 8) - -#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) -#define MLX4_MPT_PD_FLAG_RAE (1 << 28) -#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) - -#define MLX4_MPT_STATUS_SW 0xF0 -#define MLX4_MPT_STATUS_HW 0x00 - static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) { int o; @@ -197,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -254,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -321,7 +307,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, mr->size = size; mr->pd = pd; mr->access = access; - mr->enabled = MLX4_MR_DISABLED; + mr->enabled = MLX4_MPT_DISABLED; mr->key = hw_index_to_key(mridx); return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); @@ -335,14 +321,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } -int __mlx4_mr_reserve(struct mlx4_dev *dev) +int __mlx4_mpt_reserve(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); } -static int mlx4_mr_reserve(struct mlx4_dev *dev) +static int mlx4_mpt_reserve(struct mlx4_dev *dev) { u64 out_param; @@ -353,19 +339,19 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev) return -1; return get_param_l(&out_param); } - return __mlx4_mr_reserve(dev); + return __mlx4_mpt_reserve(dev); } -void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { struct mlx4_priv *priv = mlx4_priv(dev); mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); } -static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) +static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); @@ -376,19 +362,19 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) index); return; } - __mlx4_mr_release(dev, index); + __mlx4_mpt_release(dev, index); } -int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; return mlx4_table_get(dev, &mr_table->dmpt_table, index); } -static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) +static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, index); @@ -397,19 +383,19 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } - return __mlx4_mr_alloc_icm(dev, index); + return __mlx4_mpt_alloc_icm(dev, index); } -void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; mlx4_table_put(dev, &mr_table->dmpt_table, index); } -static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) +static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, index); @@ -420,7 +406,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) index); return; } - return __mlx4_mr_free_icm(dev, index); + return __mlx4_mpt_free_icm(dev, index); } int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, @@ -429,41 +415,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, u32 index; int err; - index = mlx4_mr_reserve(dev); + index = mlx4_mpt_reserve(dev); if (index == -1) return -ENOMEM; err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, access, npages, page_shift, mr); if (err) - mlx4_mr_release(dev, index); + mlx4_mpt_release(dev, index); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); -static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) +static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) { int err; - if (mr->enabled == MLX4_MR_EN_HW) { + if (mr->enabled == MLX4_MPT_EN_HW) { err = mlx4_HW2SW_MPT(dev, NULL, key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); - if (err) - mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); + mlx4_warn(dev, "MR has MWs bound to it.\n"); + return err; + } - mr->enabled = MLX4_MR_EN_SW; + mr->enabled = MLX4_MPT_EN_SW; } mlx4_mtt_cleanup(dev, &mr->mtt); + + return 0; } -void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) { - mlx4_mr_free_reserved(dev, mr); + int ret; + + ret = mlx4_mr_free_reserved(dev, mr); + if (ret) + return ret; if (mr->enabled) - mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); - mlx4_mr_release(dev, key_to_hw_index(mr->key)); + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_mpt_release(dev, key_to_hw_index(mr->key)); + + return 0; } EXPORT_SYMBOL_GPL(mlx4_mr_free); @@ -473,7 +470,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) struct mlx4_mpt_entry *mpt_entry; int err; - err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); if (err) return err; @@ -520,7 +517,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); goto err_cmd; } - mr->enabled = MLX4_MR_EN_HW; + mr->enabled = MLX4_MPT_EN_HW; mlx4_free_cmd_mailbox(dev, mailbox); @@ -530,7 +527,7 @@ err_cmd: mlx4_free_cmd_mailbox(dev, mailbox); err_table: - mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_enable); @@ -657,6 +654,101 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, } EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); +int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, + struct mlx4_mw *mw) +{ + u32 index; + + if ((type == MLX4_MW_TYPE_1 && + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || + (type == MLX4_MW_TYPE_2 && + !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) + return -ENOTSUPP; + + index = mlx4_mpt_reserve(dev); + if (index == -1) + return -ENOMEM; + + mw->key = hw_index_to_key(index); + mw->pd = pd; + mw->type = type; + mw->enabled = MLX4_MPT_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mw_alloc); + +int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + + memset(mpt_entry, 0, sizeof(*mpt_entry)); + + /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned + * off, thus creating a memory window and not a memory region. + */ + mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); + mpt_entry->pd_flags = cpu_to_be32(mw->pd); + if (mw->type == MLX4_MW_TYPE_2) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); + } + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + mw->enabled = MLX4_MPT_EN_HW; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mw_enable); + +void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + int err; + + if (mw->enabled == MLX4_MPT_EN_HW) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) + mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); + + mw->enabled = MLX4_MPT_EN_SW; + } + if (mw->enabled) + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + mlx4_mpt_release(dev, key_to_hw_index(mw->key)); +} +EXPORT_SYMBOL_GPL(mlx4_mw_free); + int mlx4_init_mr_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -831,7 +923,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, return 0; err_free: - mlx4_mr_free(dev, &fmr->mr); + (void) mlx4_mr_free(dev, &fmr->mr); return err; } EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); @@ -882,17 +974,21 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, err); return; } - fmr->mr.enabled = MLX4_MR_EN_SW; + fmr->mr.enabled = MLX4_MPT_EN_SW; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) { + int ret; + if (fmr->maps) return -EBUSY; - mlx4_mr_free(dev, &fmr->mr); - fmr->mr.enabled = MLX4_MR_DISABLED; + ret = mlx4_mr_free(dev, &fmr->mr); + if (ret) + return ret; + fmr->mr.enabled = MLX4_MPT_DISABLED; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 1ac88637ad9..00f223acada 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 719ead15e49..10c57c86388 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) { - u64 out_param; + u64 out_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&out_param, port); @@ -361,7 +361,7 @@ out: int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) { - u64 out_param; + u64 out_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -406,7 +406,7 @@ out: void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 81e2abe07bb..e891b058c1b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) { - u64 in_param; + u64 in_param = 0; u64 out_param; int err; @@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) { - u64 in_param; + u64 in_param = 0; int err; if (mlx4_is_mfunc(dev)) { @@ -319,7 +319,7 @@ err_out: static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { - u64 param; + u64 param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(¶m, qpn); @@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, qpn); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5997adc943d..1391b52f443 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -99,6 +99,7 @@ struct res_qp { struct list_head mcg_list; spinlock_t mcg_spl; int local_qpn; + atomic_t ref_count; }; enum res_mtt_states { @@ -197,6 +198,7 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; + int qpn; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev) return dev->caps.num_mpts - 1; } -static void *find_res(struct mlx4_dev *dev, int res_id, +static void *find_res(struct mlx4_dev *dev, u64 res_id, enum mlx4_resource type) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id) ret->local_qpn = id; INIT_LIST_HEAD(&ret->mcg_list); spin_lock_init(&ret->mcg_spl); + atomic_set(&ret->ref_count, 0); return &ret->com; } @@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id) return &ret->com; } -static struct res_common *alloc_fs_rule_tr(u64 id) +static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) { struct res_fs_rule *ret; @@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id) ret->com.res_id = id; ret->com.state = RES_FS_RULE_ALLOCATED; - + ret->qpn = qpn; return &ret->com; } @@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, ret = alloc_xrcdn_tr(id); break; case RES_FS_RULE: - ret = alloc_fs_rule_tr(id); + ret = alloc_fs_rule_tr(id, extra); break; default: return NULL; @@ -671,10 +674,14 @@ undo: static int remove_qp_ok(struct res_qp *res) { - if (res->com.state == RES_QP_BUSY) + if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || + !list_empty(&res->mcg_list)) { + pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", + res->com.state, atomic_read(&res->ref_count)); return -EBUSY; - else if (res->com.state != RES_QP_RESERVED) + } else if (res->com.state != RES_QP_RESERVED) { return -EPERM; + } return 0; } @@ -1231,14 +1238,14 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE: - index = __mlx4_mr_reserve(dev); + index = __mlx4_mpt_reserve(dev); if (index == -1) break; id = index & mpt_mask(dev); err = add_res_range(dev, slave, id, 1, RES_MPT, index); if (err) { - __mlx4_mr_release(dev, index); + __mlx4_mpt_release(dev, index); break; } set_param_l(out_param, index); @@ -1251,7 +1258,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) return err; - err = __mlx4_mr_alloc_icm(dev, mpt->key); + err = __mlx4_mpt_alloc_icm(dev, mpt->key); if (err) { res_abort_move(dev, slave, RES_MPT, id); return err; @@ -1586,7 +1593,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); if (err) break; - __mlx4_mr_release(dev, index); + __mlx4_mpt_release(dev, index); break; case RES_OP_MAP_ICM: index = get_param_l(&in_param); @@ -1596,7 +1603,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) return err; - __mlx4_mr_free_icm(dev, mpt->key); + __mlx4_mpt_free_icm(dev, mpt->key); res_end_move(dev, slave, RES_MPT, id); return err; break; @@ -1796,6 +1803,26 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) return be32_to_cpu(mpt->mtt_sz); } +static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; +} + +static int mr_is_fmr(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; +} + +static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; +} + +static int mr_is_region(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; +} + static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; @@ -1856,12 +1883,41 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; int phys; int id; + u32 pd; + int pd_slave; id = index & mpt_mask(dev); err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); if (err) return err; + /* Disable memory windows for VFs. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + + /* Make sure that the PD bits related to the slave id are zeros. */ + pd = mr_get_pd(inbox->buf); + pd_slave = (pd >> 17) & 0x7f; + if (pd_slave != 0 && pd_slave != slave) { + err = -EPERM; + goto ex_abort; + } + + if (mr_is_fmr(inbox->buf)) { + /* FMR and Bind Enable are forbidden in slave devices. */ + if (mr_is_bind_enabled(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + /* FMR and Memory Windows are also forbidden. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + } + phys = mr_phys_mpt(inbox->buf); if (!phys) { err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); @@ -2941,6 +2997,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, u8 steer_type_mask = 2; enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; + if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0) + return -EINVAL; + qpn = vhcr->in_modifier & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) @@ -3072,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; int err; int qpn; + struct res_qp *rqp; struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; @@ -3082,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; - err = get_res(dev, slave, qpn, RES_QP, NULL); + err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); return err; @@ -3123,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, if (err) goto err_put; - err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources.\n "); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + goto err_put; } + atomic_inc(&rqp->ref_count); err_put: put_res(dev, slave, qpn, RES_QP); return err; @@ -3143,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { int err; + struct res_qp *rqp; + struct res_fs_rule *rrule; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; + err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); + if (err) + return err; + /* Release the rule form busy state before removal */ + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); + if (err) + return err; + err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources.\n "); - return err; + goto out; } err = mlx4_cmd(dev, vhcr->in_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (!err) + atomic_dec(&rqp->ref_count); +out: + put_res(dev, slave, rrule->qpn, RES_QP); return err; } @@ -3480,7 +3557,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) while (state != 0) { switch (state) { case RES_MPT_RESERVED: - __mlx4_mr_release(dev, mpt->key); + __mlx4_mpt_release(dev, mpt->key); spin_lock_irq(mlx4_tlock(dev)); rb_erase(&mpt->com.node, &tracker->res_tree[RES_MPT]); @@ -3491,7 +3568,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) break; case RES_MPT_MAPPED: - __mlx4_mr_free_icm(dev, mpt->key); + __mlx4_mpt_free_icm(dev, mpt->key); state = RES_MPT_RESERVED; break; @@ -3754,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); /*VLAN*/ rem_slave_macs(dev, slave); + rem_slave_fs_rule(dev, slave); rem_slave_qps(dev, slave); rem_slave_srqs(dev, slave); rem_slave_cqs(dev, slave); @@ -3762,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_mtts(dev, slave); rem_slave_counters(dev, slave); rem_slave_xrcdns(dev, slave); - rem_slave_fs_rule(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index feda6c00829..e329fe1f11b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) { - u64 in_param; + u64 in_param = 0; if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, srqn); diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 8163fd0f453..afaf0c07f37 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_MICROCHIP bool "Microchip devices" default y - depends on SPI && EXPERIMENTAL + depends on SPI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +20,7 @@ if NET_VENDOR_MICROCHIP config ENC28J60 tristate "ENC28J60 support" - depends on SPI && EXPERIMENTAL + depends on SPI select CRC32 ---help--- Support for the Microchip EN28J60 ethernet chip. diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index f157334579f..a100860d45e 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig @@ -5,9 +5,6 @@ config NET_VENDOR_NATSEMI bool "National Semi-conductor devices" default y - depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \ - ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \ - PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index c4122c86f82..efa29b712d5 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, ndev); - if (lpc_mii_init(pldat) != 0) + ret = lpc_mii_init(pldat); + if (ret) goto err_out_unregister_netdev; netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 39ab4d09faa..73ce7dd6b95 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) - skb->ip_summed = CHECKSUM_NONE; - else skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; napi_gro_receive(&adapter->napi, skb); (*work_done)++; diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index 8f29feb3554..cbbeca3f8c5 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -32,8 +32,8 @@ config HAMACHI called hamachi. config YELLOWFIN - tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "Packet Engines Yellowfin Gigabit-NIC support" + depends on PCI select CRC32 ---help--- Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 0be5844d637..b1cfbb75ff1 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -579,8 +579,9 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) (TX_RING_SIZE-1)].dma; freed = pasemi_mac_unmap_tx_skb(mac, nfrags, info->skb, dmas); - } else + } else { freed = 2; + } } kfree(txring->ring_info); @@ -808,8 +809,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum = (macrx & XCT_MACRX_CSUM_M) >> XCT_MACRX_CSUM_S; - } else + } else { skb_checksum_none_assert(skb); + } packets++; tot_bytes += len; @@ -1829,10 +1831,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", err); goto out; - } else if netif_msg_probe(mac) + } else if (netif_msg_probe(mac)) { printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n", dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", mac->dma_if, dev->dev_addr); + } return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 11c3db6daff..ba3c72fce1f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 1 -#define _QLCNIC_LINUX_SUBVERSION 34 -#define QLCNIC_LINUX_VERSIONID "5.1.34" +#define _QLCNIC_LINUX_SUBVERSION 35 +#define QLCNIC_LINUX_VERSIONID "5.1.35" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -1755,7 +1755,7 @@ static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { - return adapter->ahw->hw_ops->config_loopback(adapter, mode); + return adapter->ahw->hw_ops->clear_loopback(adapter, mode); } static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index c53832b02b3..5c033f268ca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -589,13 +589,6 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) qlcnic_83xx_register_nic_idc_func(adapter, 1); qlcnic_83xx_enable_mbx_intrpt(adapter); - if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { - if (qlcnic_83xx_config_intrpt(adapter, 1)) { - netdev_err(adapter->netdev, - "Failed to enable mbx intr\n"); - return -EIO; - } - } if (qlcnic_83xx_configure_opmode(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 325e11e1ce0..f89cc7a3fe6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; + struct hlist_node *n; struct hlist_head *head; int i; unsigned long time; @@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; time = tmp_fil->ftime; @@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { head = &(adapter->rx_fhash.fhead[i]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { time = tmp_fil->ftime; if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) { @@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; - struct hlist_node *tmp_hnode, *n; + struct hlist_node *n; struct hlist_head *head; int i; u8 cmd; for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; qlcnic_sre_macaddr_change(adapter, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 6387e0cc3ea..0e630061bff 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, { struct ethhdr *phdr = (struct ethhdr *)(skb->data); struct qlcnic_filter *fil, *tmp_fil; - struct hlist_node *tmp_hnode, *n; + struct hlist_node *n; struct hlist_head *head; unsigned long time; u64 src_addr = 0; @@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, (adapter->fhash.fbucket_size - 1); head = &(adapter->rx_fhash.fhead[hindex]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && tmp_fil->vlan_id == vlan_id) { time = tmp_fil->ftime; @@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, (adapter->fhash.fbucket_size - 1); head = &(adapter->rx_fhash.fhead[hindex]); spin_lock(&adapter->rx_mac_learn_lock); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && tmp_fil->vlan_id == vlan_id) { found = 1; @@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb) { struct qlcnic_filter *fil, *tmp_fil; - struct hlist_node *tmp_hnode, *n; + struct hlist_node *n; struct hlist_head *head; struct net_device *netdev = adapter->netdev; struct ethhdr *phdr = (struct ethhdr *)(skb->data); @@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1); head = &(adapter->fhash.fhead[hindex]); - hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && tmp_fil->vlan_id == vlan_id) { if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 5d5fd06c4b4..28a6d483836 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1150,7 +1150,7 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) } if (!npar_opt_timeo) { dev_err(&adapter->pdev->dev, - "Waiting for NPAR state to opertional timeout\n"); + "Waiting for NPAR state to operational timeout\n"); return -EIO; } return 0; diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 5821966f9f2..783fa8b5cde 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -34,8 +34,8 @@ config ATP will be called atp. config 8139CP - tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" + depends on PCI select CRC32 select NET_CORE select MII diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8900398ba10..28fb50a1e9c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp) RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); - rtl_tx_performance_tweak(pdev, - (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } } static void rtl_hw_start_8168bef(struct rtl8169_private *tp) @@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_disable_clock_request(pdev); @@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) RTL_W8(MaxTxPacketSize, TxPacketMax); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) RTL_W8(MaxTxPacketSize, TxPacketMax); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); } @@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W8(MaxTxPacketSize, TxPacketMax); @@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); RTL_W8(MaxTxPacketSize, TxPacketMax); @@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); - rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33e96176e4d..bf5e3cf97c4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2220,6 +2220,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) /* MDIO bus release function */ static int sh_mdio_release(struct net_device *ndev) { + struct sh_eth_private *mdp = netdev_priv(ndev); struct mii_bus *bus = dev_get_drvdata(&ndev->dev); /* unregister mdio bus */ @@ -2234,6 +2235,9 @@ static int sh_mdio_release(struct net_device *ndev) /* free bitbang info */ free_mdio_bitbang(bus); + /* free bitbang memory */ + kfree(mdp->bitbang); + return 0; } @@ -2262,6 +2266,7 @@ static int sh_mdio_init(struct net_device *ndev, int id, bitbang->ctrl.ops = &bb_ops; /* MII controller setting */ + mdp->bitbang = bitbang; mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!mdp->mii_bus) { ret = -ENOMEM; @@ -2441,6 +2446,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } mdp->tsu_addr = ioremap(rtsu->start, resource_size(rtsu)); + if (mdp->tsu_addr == NULL) { + ret = -ENOMEM; + dev_err(&pdev->dev, "TSU ioremap failed.\n"); + goto out_release; + } mdp->port = devno % 2; ndev->features = NETIF_F_HW_VLAN_FILTER; } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index bae84fd2e73..e6655678458 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -705,6 +705,7 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; + struct bb_info *bitbang; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig index a71e1ec068e..11f168e46eb 100644 --- a/drivers/net/ethernet/seeq/Kconfig +++ b/drivers/net/ethernet/seeq/Kconfig @@ -6,7 +6,6 @@ config NET_VENDOR_SEEQ bool "SEEQ devices" default y depends on HAS_IOMEM - depends on (ARM && ARCH_ACORN) || SGI_HAS_SEEQ || EXPERIMENTAL ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index bf57b3cb16a..0bc00991d31 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) tx_queue->txd.entries); } + efx_device_detach_sync(efx); efx_stop_all(efx); efx_stop_interrupts(efx, true); @@ -832,6 +833,7 @@ out: efx_start_interrupts(efx, true); efx_start_all(efx); + netif_device_attach(efx->net_dev); return rc; rollback: @@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx) /* Flush efx_mac_work(), refill_workqueue, monitor_work */ efx_flush_all(efx); - /* Stop the kernel transmit interface late, so the watchdog - * timer isn't ticking over the flush */ + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); netif_tx_disable(efx->net_dev); efx_stop_datapath(efx); @@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) if (new_mtu > EFX_MAX_MTU) return -EINVAL; - efx_stop_all(efx); - netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + efx_device_detach_sync(efx); + efx_stop_all(efx); + mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); + netif_device_attach(efx->net_dev); return 0; } diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 50247dfe8f5..d2f790df6dc 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) * TX scheduler is stopped when we're done and before * netif_device_present() becomes false. */ - netif_tx_lock(dev); + netif_tx_lock_bh(dev); netif_device_detach(dev); - netif_tx_unlock(dev); + netif_tx_unlock_bh(dev); } #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 2d756c1d714..0a90abd2421 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -210,6 +210,7 @@ struct efx_tx_queue { * Will be %NULL if the buffer slot is currently free. * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. * Will be %NULL if the buffer slot is currently free. + * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. * @len: Buffer length, in bytes. * @flags: Flags for buffer and packet state. */ @@ -219,7 +220,8 @@ struct efx_rx_buffer { struct sk_buff *skb; struct page *page; } u; - unsigned int len; + u16 page_offset; + u16 len; u16 flags; }; #define EFX_RX_BUF_PAGE 0x0001 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 0ad790cc473..eaa8e874a3c 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) return false; tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 + && tx_queue->write_count - write_count == 1; } /* For each entry inserted into the software descriptor ring, create a diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d780a0d096b..bb579a6128c 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold; static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, struct efx_rx_buffer *buf) { - /* Offset is always within one page, so we don't need to consider - * the page order. - */ - return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + - efx->type->rx_buffer_hash_size; + return buf->page_offset + efx->type->rx_buffer_hash_size; } static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) { @@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; struct page *page; + unsigned int page_offset; struct efx_rx_page_state *state; dma_addr_t dma_addr; unsigned index, count; @@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) state->dma_addr = dma_addr; dma_addr += sizeof(struct efx_rx_page_state); + page_offset = sizeof(struct efx_rx_page_state); split: index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->flags = EFX_RX_BUF_PAGE; ++rx_queue->added_count; @@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) /* Use the second half of the page */ get_page(page); dma_addr += (PAGE_SIZE >> 1); + page_offset += (PAGE_SIZE >> 1); ++count; goto split; } @@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) } static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) + struct efx_rx_buffer *rx_buf, + unsigned int used_len) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { struct efx_rx_page_state *state; @@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, state->dma_addr, efx_rx_buf_size(efx), DMA_FROM_DEVICE); + } else if (used_len) { + dma_sync_single_for_cpu(&efx->pci_dev->dev, + rx_buf->dma_addr, used_len, + DMA_FROM_DEVICE); } } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, @@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); efx_free_rx_buffer(rx_queue->efx, rx_buf); } @@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, goto out; } - /* Release card resources - assumes all RX buffers consumed in-order - * per RX queue + /* Release and/or sync DMA mapping - assumes all RX buffers + * consumed in-order per RX queue */ - efx_unmap_rx_buffer(efx, rx_buf); + efx_unmap_rx_buffer(efx, rx_buf, len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig index ae1ce170864..3409b3f97a1 100644 --- a/drivers/net/ethernet/silan/Kconfig +++ b/drivers/net/ethernet/silan/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_SILAN bool "Silan devices" default y - depends on PCI && EXPERIMENTAL + depends on PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -19,8 +19,8 @@ config NET_VENDOR_SILAN if NET_VENDOR_SILAN config SC92031 - tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL + tristate "Silan SC92031 PCI Fast Ethernet Adapter driver" + depends on PCI select CRC32 ---help--- This is a driver for the Fast Ethernet PCI network cards based on diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 1164930a40a..c0ea838c78d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -26,8 +26,8 @@ config STMMAC_PLATFORM If unsure, say N. config STMMAC_PCI - bool "STMMAC PCI bus support (EXPERIMENTAL)" - depends on STMMAC_ETH && PCI && EXPERIMENTAL + bool "STMMAC PCI bus support" + depends on STMMAC_ETH && PCI ---help--- This is to select the Synopsys DWMAC available on PCI devices, if you have a controller with this interface, say Y or M here. diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index 57bfd859967..3074aa374c6 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -32,8 +32,8 @@ config HAPPYMEAL will be called sunhme. config SUNBMAC - tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)" - depends on SBUS && EXPERIMENTAL + tristate "Sun BigMAC 10/100baseT support" + depends on SBUS select CRC32 ---help--- This driver supports the "be" interface available as an Sbus option. @@ -61,7 +61,7 @@ config SUNGEM select SUNGEM_PHY ---help--- Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also - <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>. + <http://docs.oracle.com/cd/E19455-01/806-3985-10/806-3985-10.pdf>. config CASSINI tristate "Sun Cassini support" @@ -69,7 +69,7 @@ config CASSINI select CRC32 ---help--- Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also - <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf> + <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>. config SUNVNET tristate "Sun Virtual Network support" diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 289b4eefb42..1df0ff3839e 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) { unsigned int hash = vnet_hashfn(skb->data); struct hlist_head *hp = &vp->port_hash[hash]; - struct hlist_node *n; struct vnet_port *port; - hlist_for_each_entry(port, n, hp, hash) { + hlist_for_each_entry(port, hp, hash) { if (ether_addr_equal(port->raddr, skb->data)) return port; } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 4426151d4ac..de71b1ec462 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -88,8 +88,8 @@ config TLAN Please email feedback to <torben.mathiasen@compaq.com>. config CPMAC - tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" - depends on EXPERIMENTAL && AR7 + tristate "TI AR7 CPMAC Ethernet support" + depends on AR7 select PHYLIB ---help--- TI AR7 CPMAC Ethernet support diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7e93df6585e..80cad06e5eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); cpts_tx_timestamp(priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; @@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) writel(vlan, &priv->host_port_regs->port_vlan); - for (i = 0; i < 2; i++) + for (i = 0; i < priv->data.slaves; i++) slave_write(priv->slaves + i, vlan, reg); cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, @@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txch))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *mdio; parp = of_get_property(slave_node, "phy_id", &lenp); - if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { + if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { pr_err("Missing slave[%d] phy_id property\n", i); ret = -EINVAL; goto error_ret; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 68c3418160b..ee13dc78430 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -492,11 +492,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } +EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) @@ -1028,3 +1030,4 @@ unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } +EXPORT_SYMBOL_GPL(cpdma_control_set); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 52c05366599..72300bc9e37 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) * queue is stopped then start the queue as we have free desc for tx */ if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; |