summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c11
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c56
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c27
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c29
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c202
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c7
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c13
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c550
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c9
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
67 files changed, 785 insertions, 491 deletions
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index a2f8b2b8e27..e3f57427d5c 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -81,7 +81,7 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
-static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"};
static int ifmap_val[] __initdata = {
IF_PORT_10BASET,
IF_PORT_UNKNOWN,
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d920a529ba2..5b65992c2a0 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
static const struct chip_info {
const char *name;
int drv_flags;
-} netdrv_tbl[] __devinitdata = {
+} netdrv_tbl[] __devinitconst = {
{ "Adaptec Starfire 6915", CanHaveMII },
};
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 64d0d9c1afa..3491d4312fc 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1845,6 +1845,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
printk(KERN_ERR "amd8111e: No Power Management capability, "
"exiting.\n");
+ err = -ENODEV;
goto err_free_reg;
}
@@ -1852,6 +1853,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
printk(KERN_ERR "amd8111e: DMA not supported,"
"exiting.\n");
+ err = -ENODEV;
goto err_free_reg;
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 397596b078d..f195acfa2df 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1174,8 +1174,10 @@ static int __devinit au1000_probe(struct platform_device *pdev)
snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, aup->mac_id);
aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (aup->mii_bus->irq == NULL)
+ if (aup->mii_bus->irq == NULL) {
+ err = -ENOMEM;
goto err_out;
+ }
for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
@@ -1190,7 +1192,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto err_mdiobus_reg;
}
- if (au1000_mii_probe(dev) != 0)
+ err = au1000_mii_probe(dev);
+ if (err != 0)
goto err_out;
pDBfree = NULL;
@@ -1205,6 +1208,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
aup->pDBfree = pDBfree;
+ err = -ENODEV;
for (i = 0; i < NUM_RX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
if (!pDB)
@@ -1213,6 +1217,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->rx_db_inuse[i] = pDB;
}
+
+ err = -ENODEV;
for (i = 0; i < NUM_TX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
if (!pDB)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 55a2e379505..d19f82f7597 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -702,7 +702,7 @@ struct atl1c_platform_patch {
u32 patch_flag;
#define ATL1C_LINK_PATCH 0x1
};
-static const struct atl1c_platform_patch plats[] __devinitdata = {
+static const struct atl1c_platform_patch plats[] __devinitconst = {
{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 57d64b80fd7..623dd8635c4 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
*/
#define ATL2_PARAM(X, desc) \
- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
MODULE_PARM_DESC(X, desc);
#else
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 0e3048b788c..133d5857b9e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -10,6 +10,7 @@
#include <bcm63xx_regs.h>
#include <bcm63xx_irq.h>
#include <bcm63xx_io.h>
+#include <bcm63xx_iudma.h>
/* default number of descriptor */
#define BCMENET_DEF_RX_DESC 64
@@ -31,35 +32,6 @@
#define BCMENET_MAX_MTU 2046
/*
- * rx/tx dma descriptor
- */
-struct bcm_enet_desc {
- u32 len_stat;
- u32 address;
-};
-
-#define DMADESC_LENGTH_SHIFT 16
-#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
-#define DMADESC_OWNER_MASK (1 << 15)
-#define DMADESC_EOP_MASK (1 << 14)
-#define DMADESC_SOP_MASK (1 << 13)
-#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
-#define DMADESC_WRAP_MASK (1 << 12)
-
-#define DMADESC_UNDER_MASK (1 << 9)
-#define DMADESC_APPEND_CRC (1 << 8)
-#define DMADESC_OVSIZE_MASK (1 << 4)
-#define DMADESC_RXER_MASK (1 << 2)
-#define DMADESC_CRC_MASK (1 << 1)
-#define DMADESC_OV_MASK (1 << 0)
-#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
- DMADESC_OVSIZE_MASK | \
- DMADESC_RXER_MASK | \
- DMADESC_CRC_MASK | \
- DMADESC_OV_MASK)
-
-
-/*
* MIB Counters register definitions
*/
#define ETH_MIB_TX_GD_OCTETS 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 30f04a38922..4833b6a9031 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2957,9 +2957,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_shinfo(skb)->nr_frags +
BDS_PER_TX_PKT +
NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
- bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
- netif_tx_stop_queue(txq);
- BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+ /* Handle special storage cases separately */
+ if (txdata->tx_ring_size != 0) {
+ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
+ }
+
return NETDEV_TX_BUSY;
}
@@ -3523,15 +3527,18 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
} else
#endif
if (!bp->rx_ring_size) {
- u32 cfg = SHMEM_RD(bp,
- dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
-
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
- /* Dercease ring size for 1G functions */
- if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
- PORT_HW_CFG_NET_SERDES_IF_SGMII)
- rx_ring_size /= 10;
+ if (CHIP_IS_E3(bp)) {
+ u32 cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[BP_PORT(bp)].
+ default_cfg);
+
+ /* Decrease ring size for 1G functions */
+ if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SGMII)
+ rx_ring_size /= 10;
+ }
/* allocate at least number of buffers required by FW */
rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f7ed122f407..d5648fc666b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3052,9 +3052,8 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
- /* leave last char as NULL */
- memcpy(ether_stat->version, DRV_MODULE_VERSION,
- ETH_STAT_INFO_VERSION_LEN - 1);
+ strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN);
bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 71971a161bd..614981c0226 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -126,7 +126,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
/* Check if this request is ok */
rc = o->validate(bp, o->owner, elem);
if (rc) {
- BNX2X_ERR("Preamble failed: %d\n", rc);
+ DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
goto free_and_exit;
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 46280ba4c5d..a8800ac10df 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -782,7 +782,8 @@ static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
return i == timeout_us / 10;
}
-int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
+static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
+ u32 len)
{
int err;
u32 i, bufoff, msgoff, maxlen, apedata;
@@ -7763,7 +7764,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
- u16 *prodptr = 0;
+ u16 *prodptr = NULL;
/*
* When RSS is enabled, the status block format changes
@@ -8103,11 +8104,11 @@ static int tg3_chip_reset(struct tg3 *tp)
u16 val16;
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
- int i;
+ int j;
u32 cfg_val;
/* Wait for link training to complete. */
- for (i = 0; i < 5000; i++)
+ for (j = 0; j < 5000; j++)
udelay(100);
pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
@@ -10206,7 +10207,7 @@ static u32 tg3_irq_count(struct tg3 *tp)
static bool tg3_enable_msix(struct tg3 *tp)
{
int i, rc;
- struct msix_entry msix_ent[tp->irq_max];
+ struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
tp->txq_cnt = tp->txq_req;
tp->rxq_cnt = tp->rxq_req;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab..16814b34d4b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
unsigned int tx_tail;
void __iomem *base;
- struct sk_buff_head rx_recycle;
unsigned int dma_buf_sz;
dma_addr_t dma_rx_phy;
dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (unlikely(skb == NULL))
break;
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
desc_get_buf_len(p), DMA_TO_DEVICE);
}
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- DMA_RX_RING_SZ) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
dev->dev_addr);
}
- skb_queue_head_init(&priv->rx_recycle);
memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
/* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
- skb_queue_purge(&priv->rx_recycle);
/* Disable the MAC core */
xgmac_mac_disable(priv->base);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 745a1f53361..a4da893ac1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -43,6 +43,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#include "t4_hw.h"
@@ -695,6 +696,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable);
int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6b9f6bb2f7e..604f4f87f55 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -443,7 +443,10 @@ int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
-int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
+/*
+ * usecs to sleep while draining the dbfifo
+ */
+static int dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay,
"usecs to sleep while draining the dbfifo");
@@ -636,7 +639,7 @@ static void name_msix_vecs(struct adapter *adap)
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
@@ -644,56 +647,60 @@ static int request_msix_queue_irqs(struct adapter *adap)
return err;
for_each_ethrxq(s, ethqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->ethrxq[ethqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
for_each_ofldrxq(s, ofldqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->ofldrxq[ofldqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
- adap->msix_info[msi].desc,
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
&s->rdmarxq[rdmaqidx].rspq);
if (err)
goto unwind;
- msi++;
+ msi_index++;
}
return 0;
unwind:
while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi].vec,
+ free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
while (--ofldqidx >= 0)
- free_irq(adap->msix_info[--msi].vec,
+ free_irq(adap->msix_info[--msi_index].vec,
&s->ofldrxq[ofldqidx].rspq);
while (--ethqidx >= 0)
- free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->ethrxq[ethqidx].rspq);
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adap)
{
- int i, msi = 2;
+ int i, msi_index = 2;
struct sge *s = &adap->sge;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+ free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
}
/**
@@ -2535,9 +2542,8 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
if (!ret) {
- indices = be64_to_cpu(indices);
- *cidx = (indices >> 25) & 0xffff;
- *pidx = (indices >> 9) & 0xffff;
+ *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
+ *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
}
return ret;
}
@@ -3634,10 +3640,10 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
* field selections will fit in the 36-bit budget.
*/
if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
- int i, bits = 0;
+ int j, bits = 0;
- for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
- switch (tp_vlan_pri_map & (1 << i)) {
+ for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
+ switch (tp_vlan_pri_map & (1 << j)) {
case 0:
/* compressed filter field not enabled */
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 35b81d8b59e..32eec15fe4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -380,9 +380,11 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
if (dir)
- *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+ *data++ = (__force __be32) t4_read_reg(adap,
+ (MEMWIN0_BASE + i));
else
- t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+ t4_write_reg(adap, (MEMWIN0_BASE + i),
+ (__force u32) *data++);
}
return 0;
@@ -408,7 +410,8 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
__be32 *buf, int dir)
{
u32 pos, start, end, offset, memoffset;
- int ret;
+ int ret = 0;
+ __be32 *data;
/*
* Argument sanity checks ...
@@ -416,6 +419,10 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if ((addr & 0x3) || (len & 0x3))
return -EINVAL;
+ data = vmalloc(MEMWIN0_APERTURE);
+ if (!data)
+ return -ENOMEM;
+
/*
* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
@@ -438,7 +445,6 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
offset = (addr - start)/sizeof(__be32);
for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
- __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
/*
* If we're writing, copy the data from the caller's memory
@@ -452,7 +458,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if (offset || len < MEMWIN0_APERTURE) {
ret = t4_mem_win_rw(adap, pos, data, 1);
if (ret)
- return ret;
+ break;
}
while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
len > 0) {
@@ -466,7 +472,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
*/
ret = t4_mem_win_rw(adap, pos, data, dir);
if (ret)
- return ret;
+ break;
/*
* If we're reading, copy the data into the caller's memory
@@ -480,7 +486,8 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
}
}
- return 0;
+ vfree(data);
+ return ret;
}
int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
@@ -519,16 +526,21 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
u32 cclk_param, cclk_val;
int i, ret;
int ec, sn;
- u8 vpd[VPD_LEN], csum;
+ u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
+ vpd = vmalloc(VPD_LEN);
+ if (!vpd)
+ return -ENOMEM;
+
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd);
if (ret < 0)
- return ret;
+ goto out;
if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
dev_err(adapter->pdev_dev, "missing VPD ID string\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
id_len = pci_vpd_lrdt_size(vpd);
@@ -538,21 +550,24 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
dev_err(adapter->pdev_dev, "missing VPD-R section\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
if (vpdr_len + kw_offset > VPD_LEN) {
dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
#define FIND_VPD_KW(var, name) do { \
var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
if (var < 0) { \
dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
- return -EINVAL; \
+ ret = -EINVAL; \
+ goto out; \
} \
var += PCI_VPD_INFO_FLD_HDR_SIZE; \
} while (0)
@@ -564,7 +579,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (csum) {
dev_err(adapter->pdev_dev,
"corrupted VPD EEPROM, actual csum %u\n", csum);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
FIND_VPD_KW(ec, "EC");
@@ -587,6 +603,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
ret = t4_query_params(adapter, adapter->mbox, 0, 0,
1, &cclk_param, &cclk_val);
+
+out:
+ vfree(vpd);
if (ret)
return ret;
p->cclk = cclk_val;
@@ -727,7 +746,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
if (ret)
return ret;
if (byte_oriented)
- *data = htonl(*data);
+ *data = (__force __u32) (htonl(*data));
}
return 0;
}
@@ -975,7 +994,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
int ret, addr;
unsigned int i;
u8 first_page[SF_PAGE_SIZE];
- const u32 *p = (const u32 *)fw_data;
+ const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_img_start = adap->params.sf_fw_start;
@@ -2298,7 +2317,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
- *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
+ *data++ = (__force __be32) t4_read_reg(adap,
+ (MEMWIN0_BASE + off + i));
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 61cc0934286..77335853ac3 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -661,9 +661,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
new frame, not around filling de->setup_frame. This is non-deterministic
when re-entered but still correct. */
-#undef set_bit_le
-#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
-
static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
@@ -673,12 +670,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
- set_bit_le(255, hash_table); /* Broadcast entry */
+ __set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
netdev_for_each_mc_addr(ha, dev) {
int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
- set_bit_le(index, hash_table);
+ __set_bit_le(index, hash_table);
}
for (i = 0; i < 32; i++) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 4d6fe604fa6..d23755ea9bc 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -446,13 +446,17 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
- if (!db->desc_pool_ptr)
+ if (!db->desc_pool_ptr) {
+ err = -ENOMEM;
goto err_out_res;
+ }
db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
- if (!db->buf_pool_ptr)
+ if (!db->buf_pool_ptr) {
+ err = -ENOMEM;
goto err_out_free_desc;
+ }
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -462,8 +466,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
db->chip_id = ent->driver_data;
/* IO type range. */
db->ioaddr = pci_iomap(pdev, 0, 0);
- if (!db->ioaddr)
+ if (!db->ioaddr) {
+ err = -ENOMEM;
goto err_out_free_buf;
+ }
db->chip_revision = pdev->revision;
db->wol_mode = 0;
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ed7d1dcd956..44f7e8e82d8 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
{NULL}};
-static const char *block_name[] __devinitdata = {
+static const char *const block_name[] __devinitconst = {
"21140 non-MII",
"21140 MII PHY",
"21142 Serial PHY",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c4f37aca226..885700a1997 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1010,9 +1010,6 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
new frame, not around filling tp->setup_frame. This is non-deterministic
when re-entered but still correct. */
-#undef set_bit_le
-#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
-
static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1022,12 +1019,12 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
- set_bit_le(255, hash_table); /* Broadcast entry */
+ __set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
netdev_for_each_mc_addr(ha, dev) {
int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
- set_bit_le(index, hash_table);
+ __set_bit_le(index, hash_table);
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d1ffca83c8..7c1ec4d7920 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -236,7 +236,7 @@ struct pci_id_info {
int drv_flags; /* Driver use, intended as capability flags. */
};
-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
{ /* Sometime a Level-One switch card. */
"Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
{ "Winbond W89c840", CanHaveMII | HasBrokenTx},
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d7bb52a7bda..3b83588e51f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -218,7 +218,7 @@ enum {
struct pci_id_info {
const char *name;
};
-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
{"D-Link DFE-550TX FAST Ethernet Adapter"},
{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
{"D-Link DFE-580TX 4 port Server Adapter"},
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb3f2cb3b93..d1b6cc58763 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2129,8 +2129,11 @@ void be_detect_error(struct be_adapter *adapter)
ue_hi = (ue_hi & ~ue_hi_mask);
}
- if (ue_lo || ue_hi ||
- sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow the h/w to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true;
dev_err(&adapter->pdev->dev,
"Error detected in the card\n");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 9d71c9cc300..0e4a0ac86aa 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -150,7 +150,7 @@ struct chip_info {
int flags;
};
-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
{ "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec3b93..1d03dcdd5e5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
- skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
enable_napi(priv);
- skb_queue_head_init(&priv->rx_recycle);
-
/* Initialize a bunch of registers */
init_registers(dev);
@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bytes_sent += skb->len;
- /* If there's room in the queue (limit it to rx_buffer_size)
- * we add this skb back into the pool, if it's the right size
- */
- if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
- gfar_align_skb(skb);
- skb_queue_head(&priv->rx_recycle, skb);
- } else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
struct sk_buff *gfar_new_skb(struct net_device *dev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
-
- skb = skb_dequeue(&priv->rx_recycle);
- if (!skb)
- skb = gfar_alloc_skb(dev);
-
- return skb;
+ return gfar_alloc_skb(dev);
}
static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- skb_queue_head(&priv->rx_recycle, skb);
+ dev_kfree_skb(skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4141ef2ddaf..22eabc13ca9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1080,8 +1080,6 @@ struct gfar_private {
u32 cur_filer_idx;
- struct sk_buff_head rx_recycle;
-
/* RX queue filer rule set*/
struct ethtool_rx_list rx_list;
struct mutex rx_queue_access;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 16428843922..0a70bb55d1b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
- skb = __skb_dequeue(&ugeth->rx_recycle);
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (!skb)
- skb = netdev_alloc_skb(ugeth->ndev,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT);
- if (skb == NULL)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL;
}
-
- skb_queue_purge(&ugeth->rx_recycle);
}
static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- skb_queue_head_init(&ugeth->rx_recycle);
-
return 0;
}
@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
if (netif_msg_rx_err(ugeth))
ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
__func__, __LINE__, (u32) skb);
- if (skb) {
- skb->data = skb->head + NET_SKB_PAD;
- skb->len = 0;
- skb_reset_tail_pointer(skb);
- __skb_queue_head(&ugeth->rx_recycle, skb);
- }
+ dev_kfree_skb(skb);
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++;
@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
- if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
- skb_recycle_check(skb,
- ugeth->ug_info->uf_info.max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT))
- __skb_queue_head(&ugeth->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index f71b3e7b12d..75f337163ce 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
/* index of the first skb which hasn't been transmitted yet. */
u16 skb_dirtytx[NUM_TX_QUEUES];
- struct sk_buff_head rx_recycle;
-
struct ugeth_mii_info *mii_info;
struct phy_device *phydev;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index b8e46cc31e5..6be7b9839f3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -35,7 +35,6 @@
#include <linux/if_vlan.h>
#include <asm/ibmebus.h>
-#include <asm/abs_addr.h>
#include <asm/io.h>
#define DRV_NAME "ehea"
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 30f903332e9..d3a130ccdcc 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -141,7 +141,7 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
qp_category, /* R5 */
qp_handle, /* R6 */
sel_mask, /* R7 */
- virt_to_abs(cb_addr), /* R8 */
+ __pa(cb_addr), /* R8 */
0, 0);
}
@@ -415,7 +415,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
(u64) cat, /* R5 */
qp_handle, /* R6 */
sel_mask, /* R7 */
- virt_to_abs(cb_addr), /* R8 */
+ __pa(cb_addr), /* R8 */
0, 0, 0, 0); /* R9-R12 */
*inv_attr_id = outs[0];
@@ -528,7 +528,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
{
u64 hret, cb_logaddr;
- cb_logaddr = virt_to_abs(cb_addr);
+ cb_logaddr = __pa(cb_addr);
hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
adapter_handle, /* R4 */
@@ -545,7 +545,7 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
void *cb_addr)
{
u64 port_info;
- u64 cb_logaddr = virt_to_abs(cb_addr);
+ u64 cb_logaddr = __pa(cb_addr);
u64 arr_index = 0;
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
@@ -567,7 +567,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 port_info;
u64 arr_index = 0;
- u64 cb_logaddr = virt_to_abs(cb_addr);
+ u64 cb_logaddr = __pa(cb_addr);
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
@@ -621,6 +621,6 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
return ehea_plpar_hcall_norets(H_ERROR_DATA,
adapter_handle, /* R4 */
ressource_handle, /* R5 */
- virt_to_abs(rblock), /* R6 */
+ __pa(rblock), /* R6 */
0, 0, 0, 0); /* R7-R12 */
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index cb66f574dc9..27f881758d1 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -163,7 +163,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
goto out_kill_hwq;
}
- rpage = virt_to_abs(vpage);
+ rpage = __pa(vpage);
hret = ehea_h_register_rpage(adapter->handle,
0, EHEA_CQ_REGISTER_ORIG,
cq->fw_handle, rpage, 1);
@@ -290,7 +290,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
goto out_kill_hwq;
}
- rpage = virt_to_abs(vpage);
+ rpage = __pa(vpage);
hret = ehea_h_register_rpage(adapter->handle, 0,
EHEA_EQ_REGISTER_ORIG,
@@ -395,7 +395,7 @@ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
pr_err("hw_qpageit_get_inc failed\n");
goto out_kill_hwq;
}
- rpage = virt_to_abs(vpage);
+ rpage = __pa(vpage);
hret = ehea_h_register_rpage(adapter->handle,
0, h_call_q_selector,
qp->fw_handle, rpage, 1);
@@ -790,7 +790,7 @@ u64 ehea_map_vaddr(void *caddr)
if (!ehea_bmap)
return EHEA_INVAL_ADDR;
- index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
+ index = __pa(caddr) >> SECTION_SIZE_BITS;
top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
if (!ehea_bmap->top[top])
return EHEA_INVAL_ADDR;
@@ -812,7 +812,7 @@ static inline void *ehea_calc_sectbase(int top, int dir, int idx)
unsigned long ret = idx;
ret |= dir << EHEA_DIR_INDEX_SHIFT;
ret |= top << EHEA_TOP_INDEX_SHIFT;
- return abs_to_virt(ret << SECTION_SIZE_BITS);
+ return __va(ret << SECTION_SIZE_BITS);
}
static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
@@ -822,7 +822,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
void *pg;
u64 j, m, hret;
unsigned long k = 0;
- u64 pt_abs = virt_to_abs(pt);
+ u64 pt_abs = __pa(pt);
void *sectbase = ehea_calc_sectbase(top, dir, idx);
@@ -830,7 +830,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
for (m = 0; m < EHEA_MAX_RPAGE; m++) {
pg = sectbase + ((k++) * EHEA_PAGESIZE);
- pt[m] = virt_to_abs(pg);
+ pt[m] = __pa(pg);
}
hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
0, pt_abs, EHEA_MAX_RPAGE);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cb3356c9af8..04668b47a1d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -175,13 +175,13 @@ struct e1000_info;
/*
* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
- * WTHRESH=4, and since we want 64 bytes at a time written back, set
- * it to 5
+ * WTHRESH=4, so a setting of 5 gives the most efficient bus
+ * utilization but to avoid possible Tx stalls, set it to 1
*/
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
- (5 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 16) | /* wthresh must be +1 more than desired */\
(1 << 8) | /* hthresh */ \
0x1f) /* pthresh */
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index ed5b40985ed..d37bfd96c98 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -412,6 +412,8 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
#define E1000_REVISION_4 4
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fb659dd8db0..f444eb0b76d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* set up some performance related parameters to encourage the
* hardware to use the bus more efficiently in bursts, depends
* on the tx_int_delay to be enabled,
- * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+ * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if
@@ -6558,6 +6558,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5bd26763554..30efc9f0f47 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -410,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define IXGBE_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
-#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
#ifdef IXGBE_FCOE
/* Use 3K as the baby jumbo frame size for FCoE */
#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 18bf08c9d7a..1077cb2b38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1099,7 +1099,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
- udelay(10);
+ usleep_range(1000, 2000);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 90e41db3cb6..dbf37e4a45f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -70,6 +70,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
return 0;
case IXGBE_DEV_ID_82599_T3_LOM:
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4104ea25d81..56b20d17d0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2690,10 +2690,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_SOME);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
#endif /* CONFIG_IXGBE_PTP */
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 868af693821..fa3d552e1f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -114,6 +114,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
/* required last entry */
{0, }
};
@@ -2322,6 +2323,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
default:
break;
}
+
+#ifdef CONFIG_IXGBE_PTP
+ if (adapter->hw.mac.type == ixgbe_mac_X540)
+ mask |= IXGBE_EIMS_TIMESYNC;
+#endif
+
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
mask |= IXGBE_EIMS_FLOW_DIR;
@@ -2385,8 +2392,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
}
ixgbe_check_fan_failure(adapter, eicr);
+
#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_check_pps_event(adapter, eicr);
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
+ ixgbe_ptp_check_pps_event(adapter, eicr);
#endif
/* re-enable the original interrupt state, no lsc, no queues */
@@ -2580,7 +2589,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_check_pps_event(adapter, eicr);
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
+ ixgbe_ptp_check_pps_event(adapter, eicr);
#endif
/* would disable interrupts here but EIAM disabled it */
@@ -7045,6 +7055,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
is_wol_supported = 1;
break;
case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
/* check eeprom to see if enabled wol */
if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 39881cb17a4..d9291316ee9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -106,6 +106,83 @@ static struct sock_filter ptp_filter[] = {
};
/**
+ * ixgbe_ptp_setup_sdp
+ * @hw: the hardware private structure
+ *
+ * this function enables or disables the clock out feature on SDP0 for
+ * the X540 device. It will create a 1second periodic output that can
+ * be used as the PPS (via an interrupt).
+ *
+ * It calculates when the systime will be on an exact second, and then
+ * aligns the start of the PPS signal to that value. The shift is
+ * necessary because it can change based on the link speed.
+ */
+static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int shift = adapter->cc.shift;
+ u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem;
+ u64 ns = 0, clock_edge = 0;
+
+ if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) &&
+ (hw->mac.type == ixgbe_mac_X540)) {
+
+ /* disable the pin first */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+ IXGBE_WRITE_FLUSH(hw);
+
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /*
+ * enable the SDP0 pin as output, and connected to the
+ * native function for Timesync (ClockOut)
+ */
+ esdp |= (IXGBE_ESDP_SDP0_DIR |
+ IXGBE_ESDP_SDP0_NATIVE);
+
+ /*
+ * enable the Clock Out feature on SDP0, and allow
+ * interrupts to occur when the pin changes
+ */
+ tsauxc = (IXGBE_TSAUXC_EN_CLK |
+ IXGBE_TSAUXC_SYNCLK |
+ IXGBE_TSAUXC_SDP0_INT);
+
+ /* clock period (or pulse length) */
+ clktiml = (u32)(NSECS_PER_SEC << shift);
+ clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
+
+ /*
+ * Account for the cyclecounter wrap-around value by
+ * using the converted ns value of the current time to
+ * check for when the next aligned second would occur.
+ */
+ clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+ ns = timecounter_cyc2time(&adapter->tc, clock_edge);
+
+ div_u64_rem(ns, NSECS_PER_SEC, &rem);
+ clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift);
+
+ /* specify the initial clock start time */
+ trgttiml = (u32)clock_edge;
+ trgttimh = (u32)(clock_edge >> 32);
+
+ IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
+ IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
+
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
*
@@ -198,6 +275,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
now);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ ixgbe_ptp_setup_sdp(adapter);
+
return 0;
}
@@ -251,6 +331,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
timecounter_init(&adapter->tc, &adapter->cc, ns);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ ixgbe_ptp_setup_sdp(adapter);
return 0;
}
@@ -281,8 +362,9 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
else
- adapter->flags2 &=
- ~IXGBE_FLAG2_PTP_PPS_ENABLED;
+ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
+
+ ixgbe_ptp_setup_sdp(adapter);
return 0;
default:
break;
@@ -305,109 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
struct ptp_clock_event event;
- event.type = PTP_CLOCK_PPS;
-
- /* Make sure ptp clock is valid, and PPS event enabled */
- if (!adapter->ptp_clock ||
- !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
- return;
-
- if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ptp_clock_event(adapter->ptp_clock, &event);
- break;
- default:
- break;
- }
- }
-}
-
-/**
- * ixgbe_ptp_enable_sdp
- * @hw: the hardware private structure
- * @shift: the clock shift for calculating nanoseconds
- *
- * this function enables the clock out feature on the sdp0 for the
- * X540 device. It will create a 1second periodic output that can be
- * used as the PPS (via an interrupt).
- *
- * It calculates when the systime will be on an exact second, and then
- * aligns the start of the PPS signal to that value. The shift is
- * necessary because it can change based on the link speed.
- */
-static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
-{
- u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh;
- u64 clock_edge = 0;
- u32 rem;
-
switch (hw->mac.type) {
case ixgbe_mac_X540:
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
- /*
- * enable the SDP0 pin as output, and connected to the native
- * function for Timesync (ClockOut)
- */
- esdp |= (IXGBE_ESDP_SDP0_DIR |
- IXGBE_ESDP_SDP0_NATIVE);
-
- /*
- * enable the Clock Out feature on SDP0, and allow interrupts
- * to occur when the pin changes
- */
- tsauxc = (IXGBE_TSAUXC_EN_CLK |
- IXGBE_TSAUXC_SYNCLK |
- IXGBE_TSAUXC_SDP0_INT);
-
- /* clock period (or pulse length) */
- clktiml = (u32)(NSECS_PER_SEC << shift);
- clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
-
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
-
- /*
- * account for the fact that we can't do u64 division
- * with remainder, by converting the clock values into
- * nanoseconds first
- */
- clock_edge >>= shift;
- div_u64_rem(clock_edge, NSECS_PER_SEC, &rem);
- clock_edge += (NSECS_PER_SEC - rem);
- clock_edge <<= shift;
-
- /* specify the initial clock start time */
- trgttiml = (u32)clock_edge;
- trgttimh = (u32)(clock_edge >> 32);
-
- IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
- IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
- IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
- IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
-
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
-
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC);
+ ptp_clock_event(adapter->ptp_clock, &event);
break;
default:
break;
}
}
-/**
- * ixgbe_ptp_disable_sdp
- * @hw: the private hardware structure
- *
- * this function disables the auxiliary SDP clock out feature
- */
-static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw)
-{
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0);
-}
/**
* ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
@@ -822,9 +810,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
if (adapter->cycle_speed == cycle_speed && timinca)
return;
- /* disable the SDP clock out */
- ixgbe_ptp_disable_sdp(hw);
-
/**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
@@ -877,10 +862,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
- /* now that the shift has been calculated and the systime
- * registers reset, (re-)enable the Clock out feature*/
- ixgbe_ptp_enable_sdp(hw, shift);
-
/* store the new cycle speed */
adapter->cycle_speed = cycle_speed;
@@ -901,6 +882,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
ktime_to_ns(ktime_get_real()));
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ /*
+ * Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ ixgbe_ptp_setup_sdp(adapter);
}
/**
@@ -979,10 +966,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
*/
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
- ixgbe_ptp_disable_sdp(&adapter->hw);
-
/* stop the overflow check task */
- adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
+ adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
+ IXGBE_FLAG2_PTP_PPS_ENABLED);
+
+ ixgbe_ptp_setup_sdp(adapter);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 400f86a3117..0722f336809 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -65,6 +65,7 @@
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_X540T1 0x1560
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 383b4e1cd17..4a9c9c28568 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -175,7 +175,7 @@ struct ixgbevf_q_vector {
#define IXGBEVF_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
-#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ee9bd4819f..de1ad506665 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1747,6 +1747,7 @@ err_tx_ring_allocation:
**/
static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
int err = 0;
int vector, v_budget;
@@ -1775,6 +1776,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto out;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+
out:
return err;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index c911d883c27..f8064df10cc 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -2973,6 +2974,9 @@ jme_init_one(struct pci_dev *pdev,
/*
* set up PCI device basics
*/
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
rc = pci_enable_device(pdev);
if (rc) {
pr_err("Cannot enable PCI device\n");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0669f..84c13263c51 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
u8 work_rx_refill;
int skb_size;
- struct sk_buff_head rx_recycle;
/*
* RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct rx_desc *rx_desc;
int size;
- skb = __skb_dequeue(&mp->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
desc->byte_cnt, DMA_TO_DEVICE);
}
- if (skb != NULL) {
- if (skb_queue_len(&mp->rx_recycle) <
- mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size))
- __skb_queue_head(&mp->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
- }
+ dev_kfree_skb(skb);
}
__netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
napi_enable(&mp->napi);
- skb_queue_head_init(&mp->rx_recycle);
-
mp->int_mask = INT_EXT;
for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
mib_counters_update(mp);
del_timer_sync(&mp->mib_counters_timer);
- skb_queue_purge(&mp->rx_recycle);
-
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5a30bf82309..9b9c2ac5c4c 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3189,7 +3189,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
if (work_done < to_do) {
unsigned long flags;
- napi_gro_flush(napi);
+ napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
@@ -3945,8 +3945,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
skge_board_name(hw), hw->chip_rev);
dev = skge_devinit(hw, 0, using_dac);
- if (!dev)
+ if (!dev) {
+ err = -ENOMEM;
goto err_out_led_off;
+ }
/* Some motherboards are broken and has zero in ROM. */
if (!is_valid_ether_addr(dev->dev_addr))
@@ -4153,6 +4155,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
DMI_MATCH(DMI_BOARD_NAME, "nForce"),
},
},
+ {
+ .ident = "ASUS P5NSLI",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 2b0748dba8b..78946feab4a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
if (~reg == 0) {
dev_err(&pdev->dev, "PCI configuration read error\n");
+ err = -EIO;
goto err_out;
}
@@ -4993,8 +4994,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
&hw->st_dma);
- if (!hw->st_le)
+ if (!hw->st_le) {
+ err = -ENOMEM;
goto err_out_reset;
+ }
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index ba6506ff4ab..926c911c0ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
if (validate_eth_header_mac(slave, rule_header, rlist))
return -EINVAL;
break;
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ break;
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 5b61d12f8b9..dbaaa99a0d4 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -947,8 +947,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
i = register_netdev(dev);
if (i)
goto err_register_netdev;
-
- if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround))
+ i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
+ if (i)
goto err_create_file;
if (netif_msg_drv(np)) {
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e01c0a07a93..7dfe88398d7 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -205,6 +205,7 @@ static int __init sonic_probe1(struct net_device *dev)
if (lp->descriptors == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for "
" descriptors.\n", dev_name(lp->device));
+ err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index a688a2ddcfd..f97719c4851 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -3,13 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2009 Cavium Networks
+ * Copyright (C) 2009-2012 Cavium, Inc
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/capability.h>
+#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
@@ -33,8 +34,7 @@
#define OCTEON_MGMT_NAPI_WEIGHT 16
-/*
- * Ring sizes that are powers of two allow for more efficient modulo
+/* Ring sizes that are powers of two allow for more efficient modulo
* opertions.
*/
#define OCTEON_MGMT_RX_RING_SIZE 512
@@ -93,6 +93,7 @@ union mgmt_port_ring_entry {
#define AGL_GMX_RX_ADR_CAM4 0x1a0
#define AGL_GMX_RX_ADR_CAM5 0x1a8
+#define AGL_GMX_TX_CLK 0x208
#define AGL_GMX_TX_STATS_CTL 0x268
#define AGL_GMX_TX_CTL 0x270
#define AGL_GMX_TX_STAT0 0x280
@@ -110,8 +111,10 @@ struct octeon_mgmt {
struct net_device *netdev;
u64 mix;
u64 agl;
+ u64 agl_prt_ctl;
int port;
int irq;
+ bool has_rx_tstamp;
u64 *tx_ring;
dma_addr_t tx_ring_handle;
unsigned int tx_next;
@@ -131,6 +134,7 @@ struct octeon_mgmt {
spinlock_t lock;
unsigned int last_duplex;
unsigned int last_link;
+ unsigned int last_speed;
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
@@ -140,6 +144,8 @@ struct octeon_mgmt {
resource_size_t mix_size;
resource_size_t agl_phys;
resource_size_t agl_size;
+ resource_size_t agl_prt_ctl_phys;
+ resource_size_t agl_prt_ctl_size;
};
static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
@@ -166,22 +172,22 @@ static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
spin_unlock_irqrestore(&p->lock, flags);
}
-static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 1);
}
-static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 0);
}
-static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_tx_irq(p, 1);
}
-static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_tx_irq(p, 0);
}
@@ -233,6 +239,28 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
}
}
+static ktime_t ptp_to_ktime(u64 ptptime)
+{
+ ktime_t ktimebase;
+ u64 ptpbase;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Fill the icache with the code */
+ ktime_get_real();
+ /* Flush all pending operations */
+ mb();
+ /* Read the time and PTP clock as close together as
+ * possible. It is important that this sequence take the same
+ * amount of time to reduce jitter
+ */
+ ktimebase = ktime_get_real();
+ ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
+ local_irq_restore(flags);
+
+ return ktime_sub_ns(ktimebase, ptpbase - ptptime);
+}
+
static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
{
union cvmx_mixx_orcnt mix_orcnt;
@@ -272,6 +300,20 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
+
+ /* Read the hardware TX timestamp if one was recorded */
+ if (unlikely(re.s.tstamp)) {
+ struct skb_shared_hwtstamps ts;
+ /* Read the timestamp */
+ u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+ /* Remove the timestamp from the FIFO */
+ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
+ /* Tell the kernel about the timestamp */
+ ts.syststamp = ptp_to_ktime(ns);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ts);
+ }
+
dev_kfree_skb_any(skb);
cleaned++;
@@ -372,14 +414,23 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
/* A good packet, send it up. */
skb_put(skb, re.s.len);
good:
+ /* Process the RX timestamp if it was recorded */
+ if (p->has_rx_tstamp) {
+ /* The first 8 bytes are the timestamp */
+ u64 ns = *(u64 *)skb->data;
+ struct skb_shared_hwtstamps *ts;
+ ts = skb_hwtstamps(skb);
+ ts->hwtstamp = ns_to_ktime(ns);
+ ts->syststamp = ptp_to_ktime(ns);
+ __skb_pull(skb, 8);
+ }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
- /*
- * Packet split across skbs. This can happen if we
+ /* Packet split across skbs. This can happen if we
* increase the MTU. Buffers that are already in the
* rx ring can then end up being too small. As the rx
* ring is refilled, buffers sized for the new MTU
@@ -409,8 +460,7 @@ good:
} else {
/* Some other error, discard it. */
dev_kfree_skb_any(skb);
- /*
- * Error statistics are accumulated in
+ /* Error statistics are accumulated in
* octeon_mgmt_update_rx_stats.
*/
}
@@ -488,7 +538,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
mix_ctl.s.reset = 1;
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
cvmx_read_csr(p->mix + MIX_CTL);
- cvmx_wait(64);
+ octeon_io_clk_delay(64);
mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
if (mix_bist.u64)
@@ -537,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
cam_mode = 0;
available_cam_entries = 8;
} else {
- /*
- * One CAM entry for the primary address, leaves seven
+ /* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
available_cam_entries = 7 - netdev->uc.count;
@@ -595,12 +644,10 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
{
- struct sockaddr *sa = addr;
+ int r = eth_mac_addr(netdev, addr);
- if (!is_valid_ether_addr(sa->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ if (r)
+ return r;
octeon_mgmt_set_rx_filtering(netdev);
@@ -612,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
- /*
- * Limit the MTU to make sure the ethernet packets are between
+ /* Limit the MTU to make sure the ethernet packets are between
* 64 bytes and 16383 bytes.
*/
if (size_without_fcs < 64 || size_without_fcs > 16383) {
@@ -656,53 +702,258 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
return IRQ_HANDLED;
}
+static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ union cvmx_mio_ptp_clock_cfg ptp;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ bool have_hw_timestamps = false;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ /* Check the status of hardware for tiemstamps */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* Get the current state of the PTP clock */
+ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
+ if (!ptp.s.ext_clk_en) {
+ /* The clock has not been configured to use an
+ * external source. Program it to use the main clock
+ * reference.
+ */
+ u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
+ if (!ptp.s.ptp_en)
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
+ pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
+ (NSEC_PER_SEC << 32) / clock_comp);
+ } else {
+ /* The clock is already programmed to use a GPIO */
+ u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
+ pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
+ ptp.s.ext_clk_in,
+ (NSEC_PER_SEC << 32) / clock_comp);
+ }
+
+ /* Enable the clock if it wasn't done already */
+ if (!ptp.s.ptp_en) {
+ ptp.s.ptp_en = 1;
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
+ }
+ have_hw_timestamps = true;
+ }
+
+ if (!have_hw_timestamps)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ p->has_rx_tstamp = false;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ p->has_rx_tstamp = have_hw_timestamps;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (p->has_rx_tstamp) {
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ }
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int octeon_mgmt_ioctl(struct net_device *netdev,
struct ifreq *rq, int cmd)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- if (!netif_running(netdev))
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
+ default:
+ if (p->phydev)
+ return phy_mii_ioctl(p->phydev, rq, cmd);
return -EINVAL;
+ }
+}
- if (!p->phydev)
- return -EINVAL;
+static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
- return phy_mii_ioctl(p->phydev, rq, cmd);
+ /* Disable GMX before we make any changes. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.en = 0;
+ prtx_cfg.s.tx_en = 0;
+ prtx_cfg.s.rx_en = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ int i;
+ for (i = 0; i < 10; i++) {
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
+ break;
+ mdelay(1);
+ i++;
+ }
+ }
+}
+
+static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ /* Restore the GMX enable state only if link is set */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+}
+
+static void octeon_mgmt_update_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (!p->phydev->link)
+ prtx_cfg.s.duplex = 1;
+ else
+ prtx_cfg.s.duplex = p->phydev->duplex;
+
+ switch (p->phydev->speed) {
+ case 10:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 1;
+ }
+ break;
+ case 100:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 0;
+ }
+ break;
+ case 1000:
+ /* 1000 MBits is only supported on 6XXX chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.speed = 1;
+ prtx_cfg.s.speed_msb = 0;
+ /* Only matters for half-duplex */
+ prtx_cfg.s.slottime = 1;
+ prtx_cfg.s.burst = p->phydev->duplex;
+ }
+ break;
+ case 0: /* No link */
+ default:
+ break;
+ }
+
+ /* Write the new GMX setting with the port still disabled. */
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config is completed. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_agl_gmx_txx_clk agl_clk;
+ union cvmx_agl_prtx_ctl prtx_ctl;
+
+ prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
+ /* MII (both speeds) and RGMII 1000 speed. */
+ agl_clk.s.clk_cnt = 1;
+ if (prtx_ctl.s.mode == 0) { /* RGMII mode */
+ if (p->phydev->speed == 10)
+ agl_clk.s.clk_cnt = 50;
+ else if (p->phydev->speed == 100)
+ agl_clk.s.clk_cnt = 5;
+ }
+ cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
+ }
}
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- union cvmx_agl_gmx_prtx_cfg prtx_cfg;
unsigned long flags;
int link_changed = 0;
+ if (!p->phydev)
+ return;
+
spin_lock_irqsave(&p->lock, flags);
- if (p->phydev->link) {
- if (!p->last_link)
- link_changed = 1;
- if (p->last_duplex != p->phydev->duplex) {
- p->last_duplex = p->phydev->duplex;
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.duplex = p->phydev->duplex;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
- }
- } else {
- if (p->last_link)
- link_changed = -1;
+
+
+ if (!p->phydev->link && p->last_link)
+ link_changed = -1;
+
+ if (p->phydev->link
+ && (p->last_duplex != p->phydev->duplex
+ || p->last_link != p->phydev->link
+ || p->last_speed != p->phydev->speed)) {
+ octeon_mgmt_disable_link(p);
+ link_changed = 1;
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
}
+
p->last_link = p->phydev->link;
+ p->last_speed = p->phydev->speed;
+ p->last_duplex = p->phydev->duplex;
+
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
if (link_changed > 0) {
- netif_carrier_on(netdev);
pr_info("%s: Link is up - %d/%s\n", netdev->name,
p->phydev->speed,
DUPLEX_FULL == p->phydev->duplex ?
"Full" : "Half");
} else {
- netif_carrier_off(netdev);
pr_info("%s: Link is down\n", netdev->name);
}
}
@@ -723,9 +974,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
PHY_INTERFACE_MODE_MII);
if (!p->phydev)
- return -1;
-
- phy_start_aneg(p->phydev);
+ return -ENODEV;
return 0;
}
@@ -733,12 +982,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
static int octeon_mgmt_open(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union cvmx_mixx_ctl mix_ctl;
union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
union cvmx_mixx_oring1 oring1;
union cvmx_mixx_iring1 iring1;
- union cvmx_agl_gmx_prtx_cfg prtx_cfg;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
union cvmx_mixx_irhwm mix_irhwm;
union cvmx_mixx_orhwm mix_orhwm;
@@ -785,9 +1032,30 @@ static int octeon_mgmt_open(struct net_device *netdev)
} while (mix_ctl.s.reset);
}
- agl_gmx_inf_mode.u64 = 0;
- agl_gmx_inf_mode.s.en = 1;
- cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /* Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (p->port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
oring1.u64 = 0;
oring1.s.obase = p->tx_ring_handle >> 3;
@@ -799,18 +1067,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
- /* Disable packet I/O. */
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.en = 0;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
-
memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
octeon_mgmt_set_mac_address(netdev, &sa);
octeon_mgmt_change_mtu(netdev, netdev->mtu);
- /*
- * Enable the port HW. Packets are not allowed until
+ /* Enable the port HW. Packets are not allowed until
* cvmx_mgmt_port_enable() is called.
*/
mix_ctl.u64 = 0;
@@ -819,27 +1081,70 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_ctl.s.nbtarb = 0; /* Arbitration mode */
/* MII CB-request FIFO programmable high watermark */
mix_ctl.s.mrq_hwm = 1;
+#ifdef __LITTLE_ENDIAN
+ mix_ctl.s.lendian = 1;
+#endif
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
- if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
- || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
- /*
- * Force compensation values, as they are not
- * determined properly by HW
- */
- union cvmx_agl_gmx_drv_ctl drv_ctl;
+ /* Read the PHY to find the mode of the interface. */
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
+ goto err_noirq;
+ }
- drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
- if (port) {
- drv_ctl.s.byp_en1 = 1;
- drv_ctl.s.nctl1 = 6;
- drv_ctl.s.pctl1 = 6;
- } else {
- drv_ctl.s.byp_en = 1;
- drv_ctl.s.nctl = 6;
- drv_ctl.s.pctl = 6;
+ /* Set the mode of the interface, RGMII/MII. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+ int rgmii_mode = (p->phydev->supported &
+ (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* MII clocks counts are based on the 125Mhz
+ * reference, which has an 8nS period. So our delays
+ * need to be multiplied by this factor.
+ */
+#define NS_PER_PHY_CLK 8
+
+ /* Take the DLL and clock tree out of reset */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.clkrst = 0;
+ if (rgmii_mode) {
+ agl_prtx_ctl.s.dllrst = 0;
+ agl_prtx_ctl.s.clktx_byp = 0;
}
- cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
+
+ /* Wait for the DLL to lock. External 125 MHz
+ * reference clock must be stable at this point.
+ */
+ ndelay(256 * NS_PER_PHY_CLK);
+
+ /* Enable the interface */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.enable = 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* Read the value back to force the previous write */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+
+ /* Enable the compensation controller */
+ agl_prtx_ctl.s.comp = 1;
+ agl_prtx_ctl.s.drv_byp = 0;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ /* Force write out before wait. */
+ cvmx_read_csr(p->agl_prt_ctl);
+
+ /* For compensation state to lock. */
+ ndelay(1040 * NS_PER_PHY_CLK);
+
+ /* Some Ethernet switches cannot handle standard
+ * Interframe Gap, increase to 16 bytes.
+ */
+ cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
}
octeon_mgmt_rx_fill_ring(netdev);
@@ -870,7 +1175,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 1;
+ mix_orhwm.s.orhwm = 0;
cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -879,13 +1184,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_intena.s.othena = 1;
cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
-
/* Enable packet I/O. */
rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
rxx_frm_ctl.s.pre_align = 1;
- /*
- * When set, disables the length check for non-min sized pkts
+ /* When set, disables the length check for non-min sized pkts
* with padding in the client data.
*/
rxx_frm_ctl.s.pad_len = 1;
@@ -903,33 +1207,26 @@ static int octeon_mgmt_open(struct net_device *netdev)
rxx_frm_ctl.s.ctl_drp = 1;
/* Strip off the preamble */
rxx_frm_ctl.s.pre_strp = 1;
- /*
- * This port is configured to send PREAMBLE+SFD to begin every
+ /* This port is configured to send PREAMBLE+SFD to begin every
* frame. GMX checks that the PREAMBLE is sent correctly.
*/
rxx_frm_ctl.s.pre_chk = 1;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
- /* Enable the AGL block */
- agl_gmx_inf_mode.u64 = 0;
- agl_gmx_inf_mode.s.en = 1;
- cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
-
- /* Configure the port duplex and enables */
- prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- prtx_cfg.s.tx_en = 1;
- prtx_cfg.s.rx_en = 1;
- prtx_cfg.s.en = 1;
- p->last_duplex = 1;
- prtx_cfg.s.duplex = p->last_duplex;
- cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+ /* Configure the port duplex, speed and enables */
+ octeon_mgmt_disable_link(p);
+ if (p->phydev)
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
p->last_link = 0;
- netif_carrier_off(netdev);
-
- if (octeon_mgmt_init_phy(netdev)) {
- dev_err(p->dev, "Cannot initialize PHY.\n");
- goto err_noirq;
+ p->last_speed = 0;
+ /* PHY is not present in simulator. The carrier is enabled
+ * while initializing the phy for simulator, leave it enabled.
+ */
+ if (p->phydev) {
+ netif_carrier_off(netdev);
+ phy_start_aneg(p->phydev);
}
netif_wake_queue(netdev);
@@ -959,6 +1256,7 @@ static int octeon_mgmt_stop(struct net_device *netdev)
if (p->phydev)
phy_disconnect(p->phydev);
+ p->phydev = NULL;
netif_carrier_off(netdev);
@@ -991,6 +1289,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int rv = NETDEV_TX_BUSY;
re.d64 = 0;
+ re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
re.s.len = skb->len;
re.s.addr = dma_map_single(p->dev, skb->data,
skb->len,
@@ -1031,6 +1330,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(p->mix + MIX_ORING2, 1);
+ netdev->trans_start = jiffies;
rv = NETDEV_TX_OK;
out:
octeon_mgmt_update_tx_stats(netdev);
@@ -1068,7 +1368,7 @@ static int octeon_mgmt_get_settings(struct net_device *netdev,
if (p->phydev)
return phy_ethtool_gset(p->phydev, cmd);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
static int octeon_mgmt_set_settings(struct net_device *netdev,
@@ -1082,23 +1382,37 @@ static int octeon_mgmt_set_settings(struct net_device *netdev,
if (p->phydev)
return phy_ethtool_sset(p->phydev, cmd);
- return -EINVAL;
+ return -EOPNOTSUPP;
+}
+
+static int octeon_mgmt_nway_reset(struct net_device *dev)
+{
+ struct octeon_mgmt *p = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (p->phydev)
+ return phy_start_aneg(p->phydev);
+
+ return -EOPNOTSUPP;
}
static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
.get_drvinfo = octeon_mgmt_get_drvinfo,
- .get_link = ethtool_op_get_link,
.get_settings = octeon_mgmt_get_settings,
- .set_settings = octeon_mgmt_set_settings
+ .set_settings = octeon_mgmt_set_settings,
+ .nway_reset = octeon_mgmt_nway_reset,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops octeon_mgmt_ops = {
.ndo_open = octeon_mgmt_open,
.ndo_stop = octeon_mgmt_stop,
.ndo_start_xmit = octeon_mgmt_xmit,
- .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_do_ioctl = octeon_mgmt_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
@@ -1113,6 +1427,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
const u8 *mac;
struct resource *res_mix;
struct resource *res_agl;
+ struct resource *res_agl_prt_ctl;
int len;
int result;
@@ -1120,6 +1435,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
if (netdev == NULL)
return -ENOMEM;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
dev_set_drvdata(&pdev->dev, netdev);
p = netdev_priv(netdev);
netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
@@ -1127,6 +1444,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
p->netdev = netdev;
p->dev = &pdev->dev;
+ p->has_rx_tstamp = false;
data = of_get_property(pdev->dev.of_node, "cell-index", &len);
if (data && len == sizeof(*data)) {
@@ -1159,10 +1477,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
goto err;
}
+ res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (res_agl_prt_ctl == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
p->mix_phys = res_mix->start;
p->mix_size = resource_size(res_mix);
p->agl_phys = res_agl->start;
p->agl_size = resource_size(res_agl);
+ p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
+ p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
@@ -1181,10 +1508,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
goto err;
}
+ if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
+ result = -ENXIO;
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_agl_prt_ctl->name);
+ goto err;
+ }
p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
-
+ p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size);
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
@@ -1199,14 +1534,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
- memcpy(netdev->dev_addr, mac, 6);
+ if (mac && is_valid_ether_addr(mac)) {
+ memcpy(netdev->dev_addr, mac, ETH_ALEN);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
+ } else {
+ eth_hw_addr_random(netdev);
+ }
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ netif_carrier_off(netdev);
result = register_netdev(netdev);
if (result)
goto err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index bce01641ee6..5296cc8d3cb 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -26,7 +26,10 @@ if PCH_GBE
config PCH_PTP
bool "PCH PTP clock support"
default n
- depends on PTP_1588_CLOCK_PCH
+ depends on EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
+ select PTP_1588_CLOCK_PCH
---help---
Say Y here if you want to use Precision Time Protocol (PTP) in the
driver. PTP is a method to precisely synchronize distributed clocks
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index b528e52a8ee..2a0c9dc48eb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -38,7 +38,7 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-static const struct crb_128M_2M_block_map
+static struct crb_128M_2M_block_map
crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
{{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 473ce134ca6..24ad17ec7fc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1601,7 +1601,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
- if (qlcnic_alloc_adapter_resources(adapter))
+ err = qlcnic_alloc_adapter_resources(adapter);
+ if (err)
goto err_out_free_netdev;
adapter->dev_rst_time = jiffies;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 995d0cfc4c0..1c818254b7b 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,7 +563,7 @@ rx_next:
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
- napi_gro_flush(napi);
+ napi_gro_flush(napi, false);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1d83565cc6a..3ed7add23c1 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -228,7 +228,7 @@ typedef enum {
static const struct {
const char *name;
u32 hw_flags;
-} board_info[] __devinitdata = {
+} board_info[] __devinitconst = {
{ "RealTek RTL8139", RTL8139_CAPS },
{ "RealTek RTL8129", RTL8129_CAPS },
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bad8f2eec9b..c8bfea0524d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2438,6 +2438,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!rtsu) {
dev_err(&pdev->dev, "Not found TSU resource\n");
+ ret = -ENODEV;
goto out_release;
}
mdp->tsu_addr = ioremap(rtsu->start,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 96bd980e828..4f86d0cd516 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2019,14 +2019,14 @@ static void efx_set_rx_mode(struct net_device *net_dev)
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
- set_bit_le(bit, mc_hash->byte);
+ __set_bit_le(bit, mc_hash);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
- set_bit_le(0xff, mc_hash->byte);
+ __set_bit_le(0xff, mc_hash);
}
if (efx->port_enabled)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c1a010cda89..576a3109116 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1101,18 +1101,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
-/* Set bit in a little-endian bitfield */
-static inline void set_bit_le(unsigned nr, unsigned char *addr)
-{
- addr[nr / 8] |= (1 << (nr % 8));
-}
-
-/* Clear bit in a little-endian bitfield */
-static inline void clear_bit_le(unsigned nr, unsigned char *addr)
-{
- addr[nr / 8] &= ~(1 << (nr % 8));
-}
-
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index cdff40b6572..aab7cacb2e3 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -472,9 +472,9 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- clear_bit_le(tx_queue->queue, (void *)&reg);
+ __clear_bit_le(tx_queue->queue, &reg);
else
- set_bit_le(tx_queue->queue, (void *)&reg);
+ __set_bit_le(tx_queue->queue, &reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 5b3dd028ce8..0767043f44a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -640,8 +640,7 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
evt = list_entry(cursor, struct efx_ptp_event_rx,
link);
if (time_after(jiffies, evt->expiry)) {
- list_del(&evt->link);
- list_add(&evt->link, &ptp->evt_free_list);
+ list_move(&evt->link, &ptp->evt_free_list);
netif_warn(efx, hw, efx->net_dev,
"PTP rx event dropped\n");
}
@@ -684,8 +683,7 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
match->state = PTP_PACKET_STATE_MATCHED;
rc = PTP_PACKET_STATE_MATCHED;
- list_del(&evt->link);
- list_add(&evt->link, &ptp->evt_free_list);
+ list_move(&evt->link, &ptp->evt_free_list);
break;
}
}
@@ -820,8 +818,7 @@ static int efx_ptp_stop(struct efx_nic *efx)
/* Drop any pending receive events */
spin_lock_bh(&efx->ptp_data->evt_lock);
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
- list_del(cursor);
- list_add(cursor, &efx->ptp_data->evt_free_list);
+ list_move(cursor, &efx->ptp_data->evt_free_list);
}
spin_unlock_bh(&efx->ptp_data->evt_lock);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 4613591b43e..d8166012b7d 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
struct net_device *dev)
{
- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
struct sis190_private *tp = netdev_priv(dev);
struct pci_dev *isa_bridge;
u8 reg, tmp8;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 203d9c6ec23..fb9f6b38511 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -478,8 +478,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
/* IO region. */
ioaddr = pci_iomap(pci_dev, 0, 0);
- if (!ioaddr)
+ if (!ioaddr) {
+ ret = -ENOMEM;
goto err_out_cleardev;
+ }
sis_priv = netdev_priv(net_dev);
sis_priv->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1da313..7d51a65ab09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
unsigned int dirty_rx;
struct sk_buff **rx_skbuff;
dma_addr_t *rx_skbuff_dma;
- struct sk_buff_head rx_recycle;
struct net_device *dev;
dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be88331d17..c6cdbc4eb05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
- /*
- * If there's room in the queue (limit it to size)
- * we add this skb back into the pool,
- * if it's the right size.
- */
- if ((skb_queue_len(&priv->rx_recycle) <
- priv->dma_rx_size) &&
- skb_recycle_check(skb, priv->dma_buf_sz))
- __skb_queue_head(&priv->rx_recycle, skb);
- else
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL;
}
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
priv->eee_enabled = stmmac_eee_init(priv);
napi_enable(&priv->napi);
- skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
kfree(priv->tm);
#endif
napi_disable(&priv->napi);
- skb_queue_purge(&priv->rx_recycle);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb_ip_align(priv->dev,
- bfsize);
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (unlikely(skb == NULL))
break;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8419bf385e0..275b430aeb7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9788,6 +9788,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
if (!pci_is_pcie(pdev)) {
dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
+ err = -ENODEV;
goto err_out_free_res;
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9ae12d0c963..6c8695ec7cb 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2963,7 +2963,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
- if (gem_get_device_address(gp))
+ err = gem_get_device_address(gp);
+ if (err)
goto err_out_free_consistent;
dev->netdev_ops = &gem_netdev_ops;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d15c888e9df..49956730cd8 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -863,6 +863,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
next_dma = desc_read(desc, hw_next);
chan->head = desc_from_phys(pool, next_dma);
+ chan->count--;
chan->stats.teardown_dequeue++;
/* issue callback without locks held */