diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 10:03:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 10:03:46 -0700 |
commit | cb62ab71fe2b16e8203a0f0a2ef4eda23d761338 (patch) | |
tree | 536ba39658e47d511a489c52f7aac60cd78967e5 /drivers/net/ethernet/intel/e1000e | |
parent | 31ed8e6f93a27304c9e157dab0267772cd94eaad (diff) | |
parent | 74863948f925d9f3bb4e3d3a783e49e9c662d839 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller:
1) Get rid of the error prone NLA_PUT*() macros that used an embedded
goto.
2) Kill off the token-ring and MCA networking drivers, from Paul
Gortmaker.
3) Reduce high-order allocations made by datagram AF_UNIX sockets, from
Eric Dumazet.
4) Add PTP hardware clock support to IGB and IXGBE, from Richard
Cochran and Jacob Keller.
5) Allow users to query timestamping capabilities of a card via
ethtool, from Richard Cochran.
6) Add loadbalance mode to the teaming driver, from Jiri Pirko. Part
of this is that we can now have BPF filters not attached to sockets,
and the loadbalancing function is calculated using one.
7) Francois Romieu went through the network drivers removing gratuitous
uses of netdev->base_addr, perhaps some day we can remove it
completely but it's used for ISA probing still.
8) Add a BPF JIT for sparc. I know, who cares, right? :-)
9) Move networking sysctl registry away from using the compatability
mode interfaces in the sysctl code. From Eric W Biederman.
10) Pavel Emelyanov added a way to save and restore TCP socket state via
TCP_REPAIR, TCP_REPAIR_QUEUE, and TCP_QUEUE_SEQ socket options as
well as a way to forcefully bind a socket to a port via the
sk->sk_reuse value SK_FORCE_REUSE. There is also a
TCP_REPAIR_OPTIONS which allows to reinstante the TCP options
enabled on the connection.
11) Several enhancements from Eric Dumazet that, in particular, can
enhance splice performance on TCP sockets significantly.
a) Reset the offset of the per-socket sendmsg page when we know
we're the only use of the page in linear_to_page().
b) Add facilities such that skb->data can be backed a page rather
than SLAB kmalloc'd memory. In particular devices which were
receiving into linear RX buffers can now end up providing paged
data.
The big result is that code like splice and GRO do not have to copy
any more.
12) Allow a pure sender to more gracefully handle ACK backlogs in TCP.
What can happen at high rates is that the sender hasn't grown his
receive buffer limits at all (he's not receiving data so really
doesn't need to), but the non-data ACKs consume receive buffer
space.
sk_add_backlog() is too aggressive in dropping frames in this case,
so relax it's requirements by using the receive buffer plus the send
buffer limit as the backlog limit instead of just the former.
Also from Eric Dumazet.
13) Add ipv6 support to L2TP, from Benjamin LaHaise, James Chapman, and
Chris Elston.
14) Implement TCP early retransmit (RFC 5827), from Yuchung Cheng.
Basically, we can start fast retransmit before hiting the dupack
threshold under certain conditions.
15) New CODEL active queue management packet scheduler, from Eric
Dumazet based upon initial work by Dave Taht.
Basically, the big feature is that packets are dropped (or ECN bits
are set) based upon how long packets live in the queue, rather than
the queue length (which is what RED uses).
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1341 commits)
drivers/net/stmmac: seq_file fix memory leak
ipv6/exthdrs: strict Pad1 and PadN check
USB: qmi_wwan: Add ZTE (Vodafone) K3520-Z
USB: qmi_wwan: Add ZTE (Vodafone) K3765-Z
USB: qmi_wwan: Make forced int 4 whitelist generic
net/ipv4: replace simple_strtoul with kstrtoul
net/ipv4/ipconfig: neaten __setup placement
net: qmi_wwan: Add Vodafone/Huawei K5005 support
net: cdc_ether: Add ZTE WWAN matches before generic Ethernet
ipv6: use skb coalescing in reassembly
ipv4: use skb coalescing in defragmentation
net: introduce skb_try_coalesce()
net:ipv6:fixed space issues relating to operators.
net:ipv6:fixed a trailing white space issue.
ipv6: disable GSO on sockets hitting dst_allfrag
tg3: use netdev_alloc_frag() API
net: napi_frags_skb() is static
ppp: avoid false drop_monitor false positives
ipv6: bool/const conversions phase2
ipx: Remove spurious NULL checking in ipx_ioctl().
...
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/80003es2lan.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/82571.c | 41 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/defines.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/e1000.h | 51 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ethtool.c | 88 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/hw.h | 72 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ich8lan.c | 780 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/mac.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/manage.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 184 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/param.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/phy.c | 115 |
12 files changed, 994 insertions, 390 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index bac9dda31b6..4dd18a1f45d 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -228,9 +228,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) /* FWSM register */ mac->has_fwsm = true; /* ARC supported; valid only if manageability features are enabled. */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; + mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); /* Adaptive IFS not supported */ mac->adaptive_ifs = false; @@ -766,6 +764,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; + u16 kum_reg_data; /* * Prevent the PCI-E bus from sticking if there is no TLP connection @@ -791,6 +790,13 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ew32(CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ @@ -938,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) else reg |= (1 << 28); ew32(TARC(1), reg); + + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); } /** @@ -1433,6 +1447,7 @@ static const struct e1000_mac_operations es2_mac_ops = { /* setup_physical_interface dependent on media type */ .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations es2_phy_ops = { diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index b3fdc6977f2..36db4df09ae 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -295,9 +295,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) * ARC supported; valid only if manageability features are * enabled. */ - mac->arc_subsystem_valid = - (er32(FWSM) & E1000_FWSM_MODE_MASK) - ? true : false; + mac->arc_subsystem_valid = !!(er32(FWSM) & + E1000_FWSM_MODE_MASK); break; case e1000_82574: case e1000_82583: @@ -798,7 +797,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) /* Check for pending operations. */ for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } @@ -822,7 +821,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) for (i = 0; i < E1000_FLASH_UPDATES; i++) { usleep_range(1000, 2000); - if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + if (!(er32(EECD) & E1000_EECD_FLUPD)) break; } @@ -1000,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext; + u32 ctrl, ctrl_ext, eecd; s32 ret_val; /* @@ -1073,6 +1072,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) */ switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = er32(EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + ew32(EECD, eecd); + break; case e1000_82573: case e1000_82574: case e1000_82583: @@ -1280,6 +1289,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ew32(CTRL_EXT, reg); } + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + /* PCI-Ex Control Registers */ switch (hw->mac.type) { case e1000_82574: @@ -1763,7 +1782,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) * incoming packets directed to this port are dropped. * Eventually the LAA will be in RAR[0] and RAR[14]. */ - e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); } /** @@ -1927,6 +1947,7 @@ static const struct e1000_mac_operations e82571_mac_ops = { .setup_led = e1000e_setup_led_generic, .config_collision_dist = e1000e_config_collision_dist_generic, .read_mac_addr = e1000_read_mac_addr_82571, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations e82_phy_ops_igp = { @@ -2061,9 +2082,11 @@ const struct e1000_info e1000_82574_info = { | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_CHECK_PHY_HANG + .flags2 = FLAG2_CHECK_PHY_HANG | FLAG2_DISABLE_ASPM_L0S - | FLAG2_NO_DISABLE_RX, + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX + | FLAG2_DMA_BURST, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 3a502591716..351a4097b2b 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -74,7 +74,9 @@ #define E1000_WUS_BC E1000_WUFC_BC /* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ @@ -573,6 +575,7 @@ #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ /* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ @@ -739,6 +742,7 @@ #define I82577_E_PHY_ID 0x01540050 #define I82578_E_PHY_ID 0x004DD040 #define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ @@ -850,4 +854,8 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640 +/* FW Semaphore */ +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 + #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index b83897f76ee..6e6fffb3458 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -206,6 +206,7 @@ enum e1000_boards { board_ich10lan, board_pchlan, board_pch2lan, + board_pch_lpt, }; struct e1000_ps_page { @@ -528,6 +529,7 @@ extern const struct e1000_info e1000_ich9_info; extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_es2_info; extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, @@ -576,7 +578,7 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); -extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); @@ -673,11 +675,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) return hw->phy.ops.read_reg(hw, offset, data); } +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) { return hw->phy.ops.write_reg(hw, offset, data); } +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + static inline s32 e1000_get_cable_length(struct e1000_hw *hw) { return hw->phy.ops.get_cable_length(hw); @@ -735,9 +747,46 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) return readl(hw->hw_addr + reg); } +#define er32(reg) __er32(hw, E1000_##reg) + +/** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +static inline s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) { + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + writel(val, hw->hw_addr + reg); } +#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) + +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (__ew32((a), (reg + ((offset) << 2)), (value))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + #endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index db35dd5d96d..d863075df7a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -259,8 +259,7 @@ static int e1000_set_settings(struct net_device *netdev, * cannot be changed */ if (hw->phy.ops.check_reset_block(hw)) { - e_err("Cannot change link characteristics when SoL/IDER is " - "active.\n"); + e_err("Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -403,15 +402,15 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[1] = er32(STATUS); regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN); - regs_buff[4] = er32(RDH); - regs_buff[5] = er32(RDT); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); regs_buff[6] = er32(RDTR); regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN); - regs_buff[9] = er32(TDH); - regs_buff[10] = er32(TDT); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); + regs_buff[10] = er32(TDT(0)); regs_buff[11] = er32(TIDV); regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ @@ -727,9 +726,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, (test[pat] & write)); val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); if (val != (test[pat] & write & mask)) { - e_err("pattern test reg %04X failed: got 0x%08X " - "expected 0x%08X\n", reg + offset, val, - (test[pat] & write & mask)); + e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg + offset, val, (test[pat] & write & mask)); *data = reg; return 1; } @@ -744,8 +742,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, __ew32(&adapter->hw, reg, write & mask); val = __er32(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - e_err("set/check reg %04X test failed: got 0x%08X " - "expected 0x%08X\n", reg, (val & mask), (write & mask)); + e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); *data = reg; return 1; } @@ -775,6 +773,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) u32 i; u32 toggle; u32 mask; + u32 wlock_mac = 0; /* * The status register is Read Only, so a write should fail. @@ -797,8 +796,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ew32(STATUS, toggle); after = er32(STATUS) & toggle; if (value != after) { - e_err("failed STATUS register test got: 0x%08X expected: " - "0x%08X\n", after, value); + e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -813,15 +812,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) } REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); - REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); - REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); @@ -830,29 +829,41 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); - REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); if (!(adapter->flags & FLAG_IS_ICH)) REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); - REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); mask = 0x8003FFFF; switch (mac->type) { case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: mask |= (1 << 18); break; default: break; } - for (i = 0; i < mac->rar_entry_count; i++) + + if (mac->type == e1000_pch_lpt) + wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> + E1000_FWSM_WLOCK_MAC_SHIFT; + + for (i = 0; i < mac->rar_entry_count; i++) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), - mask, 0xFFFFFFFF); + mask, 0xFFFFFFFF); + } for (i = 0; i < mac->mta_reg_count; i++) REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); *data = 0; + return 0; } @@ -1104,11 +1115,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH, ((u64) tx_ring->dma >> 32)); - ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); - ew32(TDH, 0); - ew32(TDT, 0); + ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); + ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); @@ -1168,11 +1179,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); - ew32(RDBAH, ((u64) rx_ring->dma >> 32)); - ew32(RDLEN, rx_ring->size); - ew32(RDH, 0); - ew32(RDT, 0); + ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); + ew32(RDLEN(0), rx_ring->size); + ew32(RDH(0), 0); + ew32(RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | E1000_RCTL_SBP | E1000_RCTL_SECRC | @@ -1534,7 +1545,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) int ret_val = 0; unsigned long time; - ew32(RDT, rx_ring->count - 1); + ew32(RDT(0), rx_ring->count - 1); /* * Calculate the loop count based on the largest descriptor ring @@ -1561,7 +1572,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) if (k == tx_ring->count) k = 0; } - ew32(TDT, k); + ew32(TDT(0), k); e1e_flush(); msleep(200); time = jiffies; /* set the start time for the receive */ @@ -1791,8 +1802,7 @@ static void e1000_get_wol(struct net_device *netdev, wol->supported &= ~WAKE_UCAST; if (adapter->wol & E1000_WUFC_EX) - e_err("Interface does not support directed (unicast) " - "frame wake-up packets\n"); + e_err("Interface does not support directed (unicast) frame wake-up packets\n"); } if (adapter->wol & E1000_WUFC_EX) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index f82ecf536c8..ed5b40985ed 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -36,16 +36,6 @@ struct e1000_adapter; #include "defines.h" -#define er32(reg) __er32(hw, E1000_##reg) -#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) -#define e1e_flush() er32(STATUS) - -#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ - (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) - -#define E1000_READ_REG_ARRAY(a, reg, offset) \ - (readl((a)->hw_addr + reg + ((offset) << 2))) - enum e1e_registers { E1000_CTRL = 0x00000, /* Device Control - RW */ E1000_STATUS = 0x00008, /* Device Status - RO */ @@ -61,6 +51,7 @@ enum e1e_registers { E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ E1000_FCT = 0x00030, /* Flow Control Type - RW */ E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */ E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ @@ -94,31 +85,40 @@ enum e1e_registers { E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ - E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ - E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ - E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ - E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ - E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ - E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ - E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ -#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) - E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ - -/* Convenience macros +/* + * Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: - * E1000_RDBAL_REG(current_rx_queue) - * + * E1000_RDBAL(current_rx_queue) */ -#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */ +#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8)) + E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */ +#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8)) + E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */ +#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8)) + E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */ +#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8)) + E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */ +#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8)) + E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ + E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ +#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ - E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ - E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ - E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ - E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ - E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ + E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */ +#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8)) + E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */ +#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8)) + E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */ +#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8)) + E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */ +#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8)) + E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */ +#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8)) E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ #define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) @@ -200,6 +200,14 @@ enum e1e_registers { #define E1000_RA (E1000_RAL(0)) E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ #define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) + E1000_SHRAL_PCH_LPT_BASE = 0x05408, +#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8)) + E1000_SHRAH_PCH_LTP_BASE = 0x0540C, +#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8)) + E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */ +#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8)) + E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */ +#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8)) E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ E1000_WUC = 0x05800, /* Wakeup Control - RW */ E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ @@ -402,6 +410,8 @@ enum e1e_registers { #define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 #define E1000_DEV_ID_PCH2_LV_LM 0x1502 #define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_REVISION_4 4 @@ -422,6 +432,7 @@ enum e1000_mac_type { e1000_ich10lan, e1000_pchlan, e1000_pch2lan, + e1000_pch_lpt, }; enum e1000_media_type { @@ -459,6 +470,7 @@ enum e1000_phy_type { e1000_phy_82578, e1000_phy_82577, e1000_phy_82579, + e1000_phy_i217, }; enum e1000_bus_width { @@ -782,6 +794,7 @@ struct e1000_mac_operations { s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); s32 (*read_mac_addr)(struct e1000_hw *); }; @@ -966,6 +979,7 @@ struct e1000_dev_spec_ich8lan { struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; bool nvm_k1_enabled; bool eee_disable; + u16 eee_lp_ability; }; struct e1000_hw { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b461c24945e..bbf70ba367d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -105,6 +105,9 @@ #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 @@ -112,6 +115,8 @@ #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ #define PHY_PAGE_SHIFT 5 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ @@ -127,14 +132,22 @@ #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) @@ -147,11 +160,26 @@ #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_MASK 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_MASK 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_MASK 0x0010 /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 /* OEM Bits Phy Register */ #define HV_OEM_BITS PHY_REG(768, 25) @@ -255,6 +283,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); @@ -283,18 +313,161 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) #define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) #define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) -static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { - u32 ctrl; + u16 phy_reg; + u32 phy_id; - ctrl = er32(CTRL); - ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; - ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; - ew32(CTRL, ctrl); - e1e_flush(); - udelay(10); - ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; - ew32(CTRL, ctrl); + e1e_rphy_locked(hw, PHY_ID1, &phy_reg); + phy_id = (u32)(phy_reg << 16); + e1e_rphy_locked(hw, PHY_ID2, &phy_reg); + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + return true; + } else { + if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK)) + hw->phy.id = phy_id; + return true; + } + + return false; +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 mac_reg, fwsm = er32(FWSM); + s32 ret_val; + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to initialize PHY flow\n"); + return ret_val; + } + + /* + * The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* + * Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* fall-through */ + case e1000_pch2lan: + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + if (e1000_phy_is_accessible_pchlan(hw)) { + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + break; + } + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + e_dbg("Required LANPHYPC toggle blocked by ME\n"); + break; + } + + e_dbg("Toggling LANPHYPC\n"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + udelay(10); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + + /* + * Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; } /** @@ -324,70 +497,41 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - if (!hw->phy.ops.check_reset_block(hw)) { - u32 fwsm = er32(FWSM); - - /* - * The MAC-PHY interconnect may still be in SMBus mode after - * Sx->S0. If resetting the PHY is not blocked, toggle the - * LANPHYPC Value bit to force the interconnect to PCIe mode. - */ - e1000_toggle_lanphypc_value_ich8lan(hw); - msleep(50); - - /* - * Gate automatic PHY configuration by hardware on - * non-managed 82579 - */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) - e1000_gate_hw_phy_config_ich8lan(hw, true); - - /* - * Reset the PHY before any access to it. Doing so, ensures - * that the PHY is in a known good state before we read/write - * PHY registers. The generic reset is sufficient here, - * because we haven't determined the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); - if (ret_val) - return ret_val; + phy->id = e1000_phy_unknown; - /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); - e1000_gate_hw_phy_config_ich8lan(hw, false); - } - } + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; - phy->id = e1000_phy_unknown; - switch (hw->mac.type) { - default: - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - return ret_val; - if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; break; - /* fall-through */ - case e1000_pch2lan: - /* - * In case the PHY needs to be in mdio slow mode, - * set slow mode and try to get the PHY id again. - */ - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (ret_val) - return ret_val; - ret_val = e1000e_get_phy_id(hw); - if (ret_val) - return ret_val; - break; - } + } phy->type = e1000e_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: + case e1000_phy_i217: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; @@ -572,7 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) /* Adaptive IFS supported */ mac->adaptive_ifs = true; - /* LED operations */ + /* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: @@ -591,8 +735,12 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; - case e1000_pchlan: case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ @@ -609,12 +757,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } + if (mac->type == e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + } + /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - /* Gate automatic PHY configuration by hardware on managed 82579 */ - if ((mac->type == e1000_pch2lan) && + /* + * Gate automatic PHY configuration by hardware on managed + * 82579 and i217 + */ + if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); @@ -630,22 +786,50 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val = 0; u16 phy_reg; - if (hw->phy.type != e1000_phy_82579) + if ((hw->phy.type != e1000_phy_82579) && + (hw->phy.type != e1000_phy_i217)) return 0; ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) return ret_val; - if (hw->dev_spec.ich8lan.eee_disable) + if (dev_spec->eee_disable) phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; else phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; - return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + if (ret_val) + return ret_val; + + if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_LP_ABILITY); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); + + /* + * EEE is not supported in 100Half, so ignore partner's EEE + * in 100 ability if full-duplex is not advertised. + */ + e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); + if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS)) + dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED; +release: + hw->phy.ops.release(hw); + } + + return 0; } /** @@ -687,6 +871,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + if (!link) return 0; /* No link detected */ @@ -782,6 +969,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) break; case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -967,6 +1155,145 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) } /** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + if (index < hw->mac.rar_entry_count) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + ew32(SHRAL(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL(index - 1)) == rar_low) && + (er32(SHRAH(index - 1)) == rar_high)) + return; + + e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), er32(FWSM)); + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + /* + * The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * @@ -994,6 +1321,8 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; s32 ret_val = 0; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; @@ -1006,6 +1335,19 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } @@ -1043,6 +1385,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) /* Fall-thru */ case e1000_pchlan: case e1000_pch2lan: + case e1000_pch_lpt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -1062,10 +1405,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); - if (!(hw->mac.type == e1000_pch2lan)) { - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto release; - } + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; @@ -1076,9 +1418,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && - (hw->mac.type == e1000_pchlan)) || - (hw->mac.type == e1000_pch2lan)) { + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { /* * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. @@ -1121,8 +1463,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; - ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, - reg_data); + ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } @@ -1159,8 +1500,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { - ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, - &status_reg); + ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, + &status_reg); if (ret_val) goto release; @@ -1175,8 +1516,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) } if (hw->phy.type == e1000_phy_82577) { - ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, - &status_reg); + ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; @@ -1191,15 +1531,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) } /* Link stall fix for link up */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x0100); + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ - ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), - 0x4100); + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } @@ -1279,14 +1617,14 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) u32 mac_reg; u16 oem_reg; - if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) + if (hw->mac.type < e1000_pchlan) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - if (!(hw->mac.type == e1000_pch2lan)) { + if (hw->mac.type == e1000_pchlan) { mac_reg = er32(EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; @@ -1298,7 +1636,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) mac_reg = er32(PHY_CTRL); - ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; @@ -1325,7 +1663,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; - ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); @@ -1421,11 +1759,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, - phy_data & 0x00FF); + ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); release: hw->phy.ops.release(hw); @@ -1484,7 +1821,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) u32 mac_reg; u16 i; - if (hw->mac.type != e1000_pch2lan) + if (hw->mac.type < e1000_pch2lan) return 0; /* disable Rx path while enabling/disabling workaround */ @@ -1657,20 +1994,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_MSE_THRESHOLD); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034); if (ret_val) goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_MSE_LINK_DOWN); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005); release: hw->phy.ops.release(hw); @@ -1708,8 +2043,18 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) return ret_val; if (status_reg & HV_M_STATUS_SPEED_1000) { + u16 pm_phy_reg; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + /* LV 1G Packet drop issue wa */ + ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; + ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); + if (ret_val) + return ret_val; } else { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; @@ -1733,7 +2078,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; - if (hw->mac.type != e1000_pch2lan) + if (hw->mac.type < e1000_pch2lan) return; extcnf_ctrl = er32(EXTCNF_CTRL); @@ -1835,12 +2180,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, - I82579_LPI_UPDATE_TIMER); + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); if (!ret_val) - ret_val = hw->phy.ops.write_reg_locked(hw, - I82579_EMI_DATA, - 0x1387); + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387); hw->phy.ops.release(hw); } @@ -2213,7 +2556,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ - if (hsfsts.hsf_status.fldesvalid == 0) { + if (!hsfsts.hsf_status.fldesvalid) { e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } @@ -2233,7 +2576,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * completed. */ - if (hsfsts.hsf_status.flcinprog == 0) { + if (!hsfsts.hsf_status.flcinprog) { /* * There is no cycle running at present, * so we can start a cycle. @@ -2251,7 +2594,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcinprog == 0) { + if (!hsfsts.hsf_status.flcinprog) { ret_val = 0; break; } @@ -2293,12 +2636,12 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcdone == 1) + if (hsfsts.hsf_status.flcdone) break; udelay(1); } while (i++ < timeout); - if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) return 0; return -E1000_ERR_NVM; @@ -2409,10 +2752,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) { + if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; - } else if (hsfsts.hsf_status.flcdone == 0) { + } else if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } @@ -2642,7 +2985,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - if ((data & 0x40) == 0) { + if (!(data & 0x40)) { data |= 0x40; ret_val = e1000_write_nvm(hw, 0x19, 1, &data); if (ret_val) @@ -2760,10 +3103,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) + if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; - if (hsfsts.hsf_status.flcdone == 0) { + if (!hsfsts.hsf_status.flcdone) { e_dbg("Timeout error - flash cycle did not complete.\n"); break; } @@ -2915,10 +3258,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) * a few more times else Done */ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - if (hsfsts.hsf_status.flcerr == 1) + if (hsfsts.hsf_status.flcerr) /* repeat for some time before giving up */ continue; - else if (hsfsts.hsf_status.flcdone == 0) + else if (!hsfsts.hsf_status.flcdone) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } @@ -3060,8 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; - u16 reg; - u32 ctrl, kab; + u16 kum_cfg; + u32 ctrl, reg; s32 ret_val; /* @@ -3095,12 +3438,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) } if (hw->mac.type == e1000_pchlan) { - /* Save the NVM K1 bit setting*/ - ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + /* Save the NVM K1 bit setting */ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); if (ret_val) return ret_val; - if (reg & E1000_NVM_K1_ENABLE) + if (kum_cfg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = true; else dev_spec->nvm_k1_enabled = false; @@ -3130,6 +3473,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) /* cannot issue a flush here because it hangs the hardware */ msleep(20); + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = er32(FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, reg); + } + if (!ret_val) clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); @@ -3154,9 +3505,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(IMC, 0xffffffff); er32(ICR); - kab = er32(KABGTXD); - kab |= E1000_KABGTXD_BGSQLBIAS; - ew32(KABGTXD, kab); + reg = er32(KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, reg); return 0; } @@ -3309,6 +3660,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) */ reg = er32(RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* + * Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); } @@ -3359,6 +3717,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) ew32(FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); @@ -3422,6 +3781,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) break; case e1000_phy_82577: case e1000_phy_82579: + case e1000_phy_i217: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; @@ -3668,14 +4028,88 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_ADVERTISEMENT); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); + + /* + * Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link. + */ + if ((eee_advert & I217_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I217_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + } + + /* + * For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* + * Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_MASK; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* + * Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: ew32(PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) @@ -3704,44 +4138,61 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. + * On i217, setup Intel Rapid Start Technology. **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { - u16 phy_id1, phy_id2; s32 ret_val; - if ((hw->mac.type != e1000_pch2lan) || - hw->phy.ops.check_reset_block(hw)) + if (hw->mac.type < e1000_pch2lan) return; - ret_val = hw->phy.ops.acquire(hw); + ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) { - e_dbg("Failed to acquire PHY semaphore in resume\n"); + e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); return; } - /* Test access to the PHY registers by reading the ID regs */ - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); - if (ret_val) - goto release; - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); - if (ret_val) - goto release; - - if (hw->phy.id == ((u32)(phy_id1 << 16) | - (u32)(phy_id2 & PHY_REVISION_MASK))) - goto release; + /* + * For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; - e1000_toggle_lanphypc_value_ich8lan(hw); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } - hw->phy.ops.release(hw); - msleep(50); - e1000_phy_hw_reset(hw); - msleep(50); - return; + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* + * Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_MASK; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: - hw->phy.ops.release(hw); + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } } /** @@ -3921,7 +4372,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { - if (((er32(EECD) & E1000_EECD_PRES) == 0) && + if (!(er32(EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) { e1000e_phy_init_script_igp3(hw); } @@ -3982,6 +4433,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -4026,6 +4478,7 @@ static const struct e1000_mac_operations ich8_mac_ops = { .setup_physical_interface= e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, }; static const struct e1000_phy_operations ich8_phy_ops = { @@ -4140,3 +4593,22 @@ const struct e1000_info e1000_pch2_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index decad98c105..026e8b3ab52 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -143,12 +143,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) /* Setup the receive address */ e_dbg("Programming MAC Address into RAR[0]\n"); - e1000e_rar_set(hw, hw->mac.addr, 0); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); for (i = 1; i < rar_count; i++) - e1000e_rar_set(hw, mac_addr, i); + hw->mac.ops.rar_set(hw, mac_addr, i); } /** @@ -215,13 +215,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ - e1000e_rar_set(hw, alt_mac_addr, 0); + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); return 0; } /** - * e1000e_rar_set - Set receive address register + * e1000e_rar_set_generic - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register @@ -229,7 +229,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) * Sets the receive address array register at index to the address passed * in by addr. **/ -void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; @@ -681,7 +681,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) return ret_val; } - if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) hw->fc.requested_mode = e1000_fc_none; else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 473f8e71151..bacc950fc68 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -85,7 +85,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) /* Check that the host interface is enabled. */ hicr = er32(HICR); - if ((hicr & E1000_HICR_EN) == 0) { + if (!(hicr & E1000_HICR_EN)) { e_dbg("E1000_HOST_EN bit disabled.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 9520a6ac1f3..a4b0435b00d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION +#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -79,6 +79,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_ich10lan] = &e1000_ich10_info, [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, }; struct e1000_reg_info { @@ -110,14 +111,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Rx Registers */ {E1000_RCTL, "RCTL"}, - {E1000_RDLEN, "RDLEN"}, - {E1000_RDH, "RDH"}, - {E1000_RDT, "RDT"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, {E1000_RDTR, "RDTR"}, {E1000_RXDCTL(0), "RXDCTL"}, {E1000_ERT, "ERT"}, - {E1000_RDBAL, "RDBAL"}, - {E1000_RDBAH, "RDBAH"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, {E1000_RDFH, "RDFH"}, {E1000_RDFT, "RDFT"}, {E1000_RDFHS, "RDFHS"}, @@ -126,11 +127,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { /* Tx Registers */ {E1000_TCTL, "TCTL"}, - {E1000_TDBAL, "TDBAL"}, - {E1000_TDBAH, "TDBAH"}, - {E1000_TDLEN, "TDLEN"}, - {E1000_TDH, "TDH"}, - {E1000_TDT, "TDT"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, {E1000_TIDV, "TIDV"}, {E1000_TXDCTL(0), "TXDCTL"}, {E1000_TADV, "TADV"}, @@ -538,43 +539,15 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, adapter->hw_csum_good++; } -/** - * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() - * @hw: pointer to the HW structure - * @tail: address of tail descriptor register - * @i: value to write to tail descriptor register - * - * When updating the tail register, the ME could be accessing Host CSR - * registers at the same time. Normally, this is handled in h/w by an - * arbiter but on some parts there is a bug that acknowledges Host accesses - * later than it should which could result in the descriptor register to - * have an incorrect value. Workaround this by checking the FWSM register - * which has bit 24 set while ME is accessing Host CSR registers, wait - * if it is set and try again a number of times. - **/ -static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, - unsigned int i) -{ - unsigned int j = 0; - - while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && - (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) - udelay(50); - - writel(i, tail); - - if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) - return E1000_ERR_SWFW_SYNC; - - return 0; -} - static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); - if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { + writel(i, rx_ring->tail); + + if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); e_err("ME firmware caused invalid RDT - resetting\n"); @@ -586,8 +559,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, tx_ring->tail); - if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { + if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); e_err("ME firmware caused invalid TDT - resetting\n"); @@ -1053,7 +1029,8 @@ static void e1000_print_hw_hang(struct work_struct *work) if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { - /* May be block on write-back, flush and detect again + /* + * May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); @@ -1108,6 +1085,10 @@ static void e1000_print_hw_hang(struct work_struct *work) phy_1000t_status, phy_ext_status, pci_status); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); } /** @@ -1645,7 +1626,10 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) adapter->flags2 &= ~FLAG2_IS_DISCARDING; writel(0, rx_ring->head); - writel(0, rx_ring->tail); + if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -2318,7 +2302,10 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_clean = 0; writel(0, tx_ring->head); - writel(0, tx_ring->tail); + if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); } /** @@ -2530,33 +2517,31 @@ err: } /** - * e1000_clean - NAPI Rx polling callback + * e1000e_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback - * @budget: amount of packets driver is allowed to process this poll + * @weight: number of packets driver is allowed to process this poll **/ -static int e1000_clean(struct napi_struct *napi, int budget) +static int e1000e_poll(struct napi_struct *napi, int weight) { - struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); struct e1000_hw *hw = &adapter->hw; struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 1, work_done = 0; adapter = netdev_priv(poll_dev); - if (adapter->msix_entries && - !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) - goto clean_rx; - - tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); + if (!adapter->msix_entries || + (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); -clean_rx: - adapter->clean_rx(adapter->rx_ring, &work_done, budget); + adapter->clean_rx(adapter->rx_ring, &work_done, weight); if (!tx_cleaned) - work_done = budget; + work_done = weight; - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { + /* If weight not fully consumed, exit the polling mode */ + if (work_done < weight) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); napi_complete(napi); @@ -2800,13 +2785,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); - ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); - ew32(TDBAH, (tdba >> 32)); - ew32(TDLEN, tdlen); - ew32(TDH, 0); - ew32(TDT, 0); - tx_ring->head = adapter->hw.hw_addr + E1000_TDH; - tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDLEN(0), tdlen); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); @@ -2879,8 +2864,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 rctl, rfctl; u32 pages = 0; - /* Workaround Si errata on 82579 - configure jumbo frame flow */ - if (hw->mac.type == e1000_pch2lan) { + /* Workaround Si errata on PCHx - configure jumbo frame flow */ + if (hw->mac.type >= e1000_pch2lan) { s32 ret_val; if (adapter->netdev->mtu > ETH_DATA_LEN) @@ -2955,6 +2940,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* Enable Extended Status in all Receive Descriptors */ rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_EXTEN; + ew32(RFCTL, rfctl); /* * 82571 and greater support packet-split where the protocol @@ -2980,13 +2966,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) if (adapter->rx_ps_pages) { u32 psrctl = 0; - /* - * disable packet split support for IPv6 extension headers, - * because some malformed IPv6 headers can hang the Rx - */ - rfctl |= (E1000_RFCTL_IPV6_EX_DIS | - E1000_RFCTL_NEW_IPV6_EXT_DIS); - /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; @@ -3025,7 +3004,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) */ } - ew32(RFCTL, rfctl); ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ adapter->flags &= ~FLAG_RX_RESTART_NOW; @@ -3110,13 +3088,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; - ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); - ew32(RDBAH, (rdba >> 32)); - ew32(RDLEN, rdlen); - ew32(RDH, 0); - ew32(RDT, 0); - rx_ring->head = adapter->hw.hw_addr + E1000_RDH; - rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rdlen); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); @@ -3229,7 +3207,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) netdev_for_each_uc_addr(ha, netdev) { if (!rar_entries) break; - e1000e_rar_set(hw, ha->addr, rar_entries--); + hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); count++; } } @@ -3510,6 +3488,7 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->refresh_time = 0x1000; break; case e1000_pch2lan: + case e1000_pch_lpt: fc->high_water = 0x05C20; fc->low_water = 0x05048; fc->pause_time = 0x0650; @@ -4038,6 +4017,7 @@ static int e1000_close(struct net_device *netdev) static int e1000_set_mac(struct net_device *netdev, void *p) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -4046,7 +4026,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); - e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { /* activate the work around */ @@ -4060,9 +4040,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p) * are dropped. Eventually the LAA will be in RAR[0] and * RAR[14] */ - e1000e_rar_set(&adapter->hw, - adapter->hw.mac.addr, - adapter->hw.mac.rar_entry_count - 1); + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); } return 0; @@ -4641,7 +4620,7 @@ link_up: * reset from the other port. Set the appropriate LAA in RAR[0] */ if (e1000e_get_laa_state_82571(hw)) - e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) e1000e_check_82574_phy_workaround(adapter); @@ -5151,6 +5130,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* if count is 0 then mapping error has occurred */ count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); if (count) { + skb_tx_timestamp(skb); + netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ @@ -5285,22 +5266,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - /* Jumbo frame workaround on 82579 requires CRC be stripped */ - if ((adapter->hw.mac.type == e1000_pch2lan) && + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((adapter->hw.mac.type >= e1000_pch2lan) && !(adapter->flags2 & FLAG2_CRC_STRIPPING) && (new_mtu > ETH_DATA_LEN)) { - e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); + e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); return -EINVAL; } - /* 82573 Errata 17 */ - if (((adapter->hw.mac.type == e1000_82573) || - (adapter->hw.mac.type == e1000_82574)) && - (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { - adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; - e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); - } - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ @@ -5694,7 +5667,7 @@ static int __e1000_resume(struct pci_dev *pdev) return err; } - if (hw->mac.type == e1000_pch2lan) + if (hw->mac.type >= e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); e1000e_power_up_phy(adapter); @@ -6226,7 +6199,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; @@ -6593,6 +6566,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 16adeb9418a..55cc1565bc2 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -166,8 +166,8 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea * * Default Value: 1 (enabled) */ -E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ - "the CRC"); +E1000_PARAM(CrcStripping, + "Enable CRC Stripping, disable if your BMC needs the CRC"); struct e1000_option { enum { enable_option, range_option, list_option } type; @@ -347,8 +347,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) /* * Make sure a message is printed for non-special - * values. And in case of an invalid option, display - * warning, use default and got through itr/itr_setting + * values. And in case of an invalid option, display + * warning, use default and go through itr/itr_setting * adjustment logic below */ if ((adapter->itr > 4) && @@ -365,7 +365,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) * Make sure a message is printed for non-special * default values */ - if (adapter->itr > 40) + if (adapter->itr > 4) e_info("%s set to default %d\n", opt.name, adapter->itr); } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 35b45578c60..0334d013bc3 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -639,6 +639,45 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) } /** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return e1e_wphy(hw, PHY_1000T_CTRL, phy_data); +} + +/** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * @@ -659,7 +698,11 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - return e1e_wphy(hw, I82577_CFG_REG, phy_data); + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); } /** @@ -718,12 +761,28 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if (phy->disable_polarity_correction == 1) + if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ - if (phy->type == e1000_phy_bm) + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) @@ -879,31 +938,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) return ret_val; } - ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); - if (ret_val) - return ret_val; - - /* load defaults for future use */ - phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? - ((data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy->ms_type) { - case e1000_ms_force_master: - data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - data |= CR_1000T_MS_ENABLE; - data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + ret_val = e1000_set_master_slave_mode(hw); } return ret_val; @@ -1090,7 +1125,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) * If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ - if (phy->autoneg_advertised == 0) + if (!phy->autoneg_advertised) phy->autoneg_advertised = phy->autoneg_mask; e_dbg("Reconfiguring auto-neg advertisement params\n"); @@ -1596,7 +1631,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) - phy->speed_downgraded = (phy_data & mask); + phy->speed_downgraded = !!(phy_data & mask); return ret_val; } @@ -1925,8 +1960,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->polarity_correction = (phy_data & - M88E1000_PSCR_POLARITY_REVERSAL); + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) @@ -1936,7 +1971,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = e1000_get_cable_length(hw); @@ -1999,7 +2034,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { @@ -2052,8 +2087,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; - phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) - ? false : true; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); @@ -2070,7 +2104,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; @@ -2320,6 +2354,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; default: phy_type = e1000_phy_unknown; break; @@ -2979,7 +3016,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && - ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { + !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, @@ -3265,7 +3302,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { |