summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c368
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c377
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c240
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c349
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c126
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h3
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c912
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h30
27 files changed, 2669 insertions, 958 deletions
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e81..0b3e23ec37f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
- unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = resource_size(res_mem);
- if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->base = ioremap(res_mem->start, iomem_size);
+ priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
if (priv->base == NULL) {
ret = -ENOMEM;
- goto out_release_mem;
+ goto out;
}
+
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ clk_prepare_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
- clk_enable(priv->phy_clk);
+ clk_prepare_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
return 0;
out_unregister_mdio:
- if (priv->mii_bus) {
+ if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
- }
out_free_mdio:
if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
bcm_enet_mdio_write_mii);
}
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
/* disable hw block clocks */
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
- unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- iomem_size = resource_size(res);
- if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
- return -EBUSY;
-
- bcm_enet_shared_base = ioremap(res->start, iomem_size);
- if (!bcm_enet_shared_base) {
- release_mem_region(res->start, iomem_size);
+ bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bcm_enet_shared_base)
return -ENOMEM;
- }
+
return 0;
}
static int bcm_enet_shared_remove(struct platform_device *pdev)
{
- struct resource *res;
-
- iounmap(bcm_enet_shared_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87..eec0af45b85 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb) {
- bgmac_err(bgmac, "Allocation of skb failed!\n");
+ if (!slot->skb)
return -ENOMEM;
- }
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
};
/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus;
+ int i, err = 0;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "bgmac mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+ bgmac->core->core_unit);
+ mii_bus->priv = bgmac;
+ mii_bus->read = bgmac_mii_read;
+ mii_bus->write = bgmac_mii_write;
+ mii_bus->parent = &bgmac->core->dev;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+ mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (!mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ bgmac_err(bgmac, "Registration of mii bus failed\n");
+ goto err_free_irq;
+ }
+
+ bgmac->mii_bus = mii_bus;
+
+ return err;
+
+err_free_irq:
+ kfree(mii_bus->irq);
+err_free_bus:
+ mdiobus_free(mii_bus);
+ return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus = bgmac->mii_bus;
+
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
+/**************************************************
* BCMA bus ops
**************************************************/
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ err = bgmac_mii_register(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register MDIO\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
err = -ENOTSUPP;
- goto err_dma_free;
+ goto err_mii_unregister;
}
netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
return 0;
+err_mii_unregister:
+ bgmac_mii_unregister(bgmac);
err_dma_free:
bgmac_dma_free(bgmac);
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
+ bgmac_mii_unregister(bgmac);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f..98d4b5fcc07 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
+ struct mii_bus *mii_bus;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6..5d204492c60 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -416,7 +416,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ &bp->status_blk_mapping,
+ GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL)
goto alloc_mem_err;
- memset(status_blk, 0, bp->status_stats_size);
-
bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr =
@@ -3212,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
- __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
skb->protocol = eth_type_trans(skb, bp->dev);
@@ -3554,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev)
rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
- if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
+ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
if (dev->flags & IFF_PROMISC) {
@@ -7696,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev);
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
- features |= NETIF_F_HW_VLAN_RX;
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
return features;
}
@@ -7707,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev);
/* TSO with VLAN tag won't work with current firmware */
- if (features & NETIF_F_HW_VLAN_TX)
+ if (features & NETIF_F_HW_VLAN_CTAG_TX)
dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
else
dev->vlan_features &= ~NETIF_F_ALL_TSO;
- if ((!!(features & NETIF_F_HW_VLAN_RX) !=
+ if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
!!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
netif_running(dev)) {
bnx2_netif_stop(bp, false);
@@ -8552,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->vlan_features = dev->hw_features;
- dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a96508..3dba2a70a00 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.02-0"
-#define DRV_MODULE_RELDATE "2013/01/14"
+#define DRV_MODULE_VERSION "1.78.17-0"
+#define DRV_MODULE_RELDATE "2013/04/11"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -844,6 +850,9 @@ struct bnx2x_common {
#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
+#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
+ CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
CHIP_IS_57712_MF(bp) || \
CHIP_IS_57712_VF(bp))
@@ -853,9 +862,7 @@ struct bnx2x_common {
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
CHIP_IS_57810_VF(bp) || \
- CHIP_IS_57811(bp) || \
- CHIP_IS_57811_MF(bp) || \
- CHIP_IS_57811_VF(bp) || \
+ CHIP_IS_57811xx(bp) || \
CHIP_IS_57840(bp) || \
CHIP_IS_57840_MF(bp) || \
CHIP_IS_57840_VF(bp))
@@ -1215,14 +1222,16 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
struct bnx2x_prev_path_list {
+ struct list_head list;
u8 bus;
u8 slot;
u8 path;
- struct list_head list;
+ u8 aer;
u8 undi;
};
@@ -1269,6 +1278,8 @@ struct bnx2x {
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1292,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
@@ -1944,12 +1957,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2286,7 +2296,7 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 57619dd4a92..b8fbe266ab6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* Compute number of aggregated segments, and gso_type.
*/
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- u16 len_on_bd, unsigned int pkt_len)
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
/* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
- NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- skb_shinfo(skb)->gso_size);
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- le16_to_cpu(cqe->pkt_len));
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -641,6 +642,14 @@ static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
&iph->saddr, &iph->daddr, 0);
}
+
+static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+ void (*gro_func)(struct bnx2x*, struct sk_buff*))
+{
+ skb_set_network_header(skb, 0);
+ gro_func(bp, skb);
+ tcp_gro_complete(skb);
+}
#endif
static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -648,19 +657,17 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
- skb_set_network_header(skb, 0);
switch (be16_to_cpu(skb->protocol)) {
case ETH_P_IP:
- bnx2x_gro_ip_csum(bp, skb);
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
break;
case ETH_P_IPV6:
- bnx2x_gro_ipv6_csum(bp, skb);
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
be16_to_cpu(skb->protocol));
}
- tcp_gro_complete(skb);
}
#endif
napi_gro_receive(&fp->napi, skb);
@@ -718,7 +725,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
@@ -993,7 +1000,7 @@ reuse_rx:
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb,
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
napi_gro_receive(&fp->napi, skb);
@@ -1037,6 +1044,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
DP(NETIF_MSG_INTR,
"got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
+
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
@@ -1718,7 +1726,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
}
-static int bnx2x_setup_irqs(struct bnx2x *bp)
+int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG &&
@@ -2009,7 +2017,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
* Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled.
*/
-static void bnx2x_squeeze_objects(struct bnx2x *bp)
+void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2574,6 +2582,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ bnx2x_pre_irq_nic_init(bp);
+
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
@@ -2583,11 +2593,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error2);
}
- /* Setup NIC internals and enable interrupts */
- bnx2x_nic_init(bp, load_code);
-
/* Init per-function objects */
if (IS_PF(bp)) {
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_post_irq_nic_init(bp, load_code);
+
bnx2x_init_bp_objs(bp);
bnx2x_iov_nic_init(bp);
@@ -2657,7 +2667,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (IS_PF(bp))
rc = bnx2x_set_eth_mac(bp, true);
else /* vf */
- rc = bnx2x_vfpf_set_mac(bp);
+ rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+ true);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
@@ -2777,7 +2788,7 @@ load_error0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
{
u8 rc = 0, cos, i;
@@ -2926,9 +2937,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_free_fp_mem_cnic(bp);
if (IS_PF(bp)) {
- bnx2x_free_mem(bp);
if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp);
+ bnx2x_free_mem(bp);
}
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
@@ -3089,11 +3100,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -3101,11 +3112,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -3134,7 +3144,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
-static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
__sum16 tsum = (__force __sum16) csum;
@@ -3149,30 +3159,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else if (skb_is_gso(skb)) {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
return rc;
}
@@ -3257,14 +3284,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -3275,13 +3311,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3301,6 +3337,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+/**
* bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
* @bp: driver handle
@@ -3308,15 +3378,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3331,17 +3401,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3355,9 +3423,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@ -3403,6 +3471,75 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ else
+ hlen_w += sizeof(struct udphdr) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((~iph->check) -
+ iph->tot_len - iph->frag_off));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+ pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+ }
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3418,6 +3555,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3485,7 +3623,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3533,12 +3671,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3558,19 +3693,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
-#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp)) {
-#endif
+ if (IS_VF(bp))
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
-#ifndef BNX2X_STOP_ON_ERROR
- } else {
+ else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- }
-#endif
}
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3580,23 +3712,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ }
- if (IS_MF_SI(bp) || IS_VF(bp)) {
- /* fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ /* Add the macs to the parsing BD this is a vf */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3618,14 +3785,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3638,10 +3804,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
@@ -3731,9 +3899,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c..151675d66b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
@@ -295,16 +295,29 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_preirq_nic_init - init driver internals.
*
* @bp: driver handle
*
* Initializes:
- * - rings
+ * - fastpath object
+ * - fastpath rings
+ * etc.
+ */
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_postirq_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
* - status blocks
+ * - slowpath rings
* - etc.
*/
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
/**
* bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
*
@@ -496,7 +509,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +850,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +860,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +986,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1396,4 +1415,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
*
*/
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f..ce1a9161867 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1364,11 +1364,27 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
return rc;
}
+static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
+ int buf_size)
+{
+ int rc;
+
+ rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
+
+ if (!rc) {
+ __be32 *be = (__be32 *)buf;
+
+ while ((buf_size -= 4) >= 0)
+ *buf++ = be32_to_cpu(*be++);
+ }
+
+ return rc;
+}
+
static int bnx2x_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *eebuf)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1383,9 +1399,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
/* parameters already validated in ethtool_get_eeprom */
- rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
-
- return rc;
+ return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
}
static int bnx2x_get_module_eeprom(struct net_device *dev,
@@ -1393,10 +1407,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
u8 *data)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0, phy_idx;
+ int rc = -EINVAL, phy_idx;
u8 *user_data = data;
- int remaining_len = ee->len, xfer_size;
- unsigned int page_off = ee->offset;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1418,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- bnx2x_acquire_phy_lock(bp);
- while (!rc && remaining_len > 0) {
- xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
- SFP_EEPROM_PAGE_SIZE : remaining_len;
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
&bp->link_params,
- page_off,
+ I2C_DEV_ADDR_A0,
+ start_addr,
xfer_size,
user_data);
- remaining_len -= xfer_size;
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
user_data += xfer_size;
- page_off += xfer_size;
+ start_addr += xfer_size;
}
- bnx2x_release_phy_lock(bp);
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
return rc;
}
@@ -1427,24 +1471,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct bnx2x *bp = netdev_priv(dev);
- int phy_idx;
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
-
phy_idx = bnx2x_get_cur_phy_idx(bp);
- switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFPP_10G_FIBER:
- case ETH_PHY_SFP_1G_FIBER:
- case ETH_PHY_DA_TWINAX:
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- return 0;
- default:
- return -EOPNOTSUPP;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
+ return 0;
}
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -1496,9 +1566,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
int buf_size)
{
int rc;
- u32 cmd_flags;
- u32 align_offset;
- __be32 val;
+ u32 cmd_flags, align_offset, val;
+ __be32 val_be;
if (offset + buf_size > bp->common.flash_size) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1517,16 +1586,16 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
align_offset = (offset & ~0x03);
- rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
if (rc == 0) {
- val &= ~(0xff << BYTE_OFFSET(offset));
- val |= (*data_buf << BYTE_OFFSET(offset));
-
/* nvram data is returned as an array of bytes
* convert it back to cpu order
*/
- val = be32_to_cpu(val);
+ val = be32_to_cpu(val_be);
+
+ val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
+ val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
cmd_flags);
@@ -1820,12 +1889,15 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
bp->link_params.req_flow_ctrl[cfg_idx] =
BNX2X_FLOW_CTRL_AUTO;
}
- bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_fc_auto_adv = 0;
if (epause->rx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
if (epause->tx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
+
+ if (!bp->link_params.req_fc_auto_adv)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
}
DP(BNX2X_MSG_ETHTOOL,
@@ -2526,14 +2598,168 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
return rc;
}
+struct code_entry {
+ u32 sram_start_addr;
+ u32 code_attribute;
+#define CODE_IMAGE_TYPE_MASK 0xf0800003
+#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
+#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
+#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
+ u32 nvm_start_addr;
+};
+
+#define CODE_ENTRY_MAX 16
+#define CODE_ENTRY_EXTENDED_DIR_IDX 15
+#define MAX_IMAGES_IN_EXTENDED_DIR 64
+#define NVRAM_DIR_OFFSET 0x14
+
+#define EXTENDED_DIR_EXISTS(code) \
+ ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
+ (code & CODE_IMAGE_LENGTH_MASK) != 0)
+
#define CRC32_RESIDUAL 0xdebb20e3
+#define CRC_BUFF_SIZE 256
+
+static int bnx2x_nvram_crc(struct bnx2x *bp,
+ int offset,
+ int size,
+ u8 *buff)
+{
+ u32 crc = ~0;
+ int rc = 0, done = 0;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
+
+ while (done < size) {
+ int count = min_t(int, size - done, CRC_BUFF_SIZE);
+
+ rc = bnx2x_nvram_read(bp, offset + done, buff, count);
+
+ if (rc)
+ return rc;
+
+ crc = crc32_le(crc, buff, count);
+ done += count;
+ }
+
+ if (crc != CRC32_RESIDUAL)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int bnx2x_test_nvram_dir(struct bnx2x *bp,
+ struct code_entry *entry,
+ u8 *buff)
+{
+ size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
+ u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
+ int rc;
+
+ /* Zero-length images and AFEX profiles do not have CRC */
+ if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
+ return 0;
+
+ rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "image %x has failed crc test (rc %d)\n", type, rc);
+
+ return rc;
+}
+
+static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
+{
+ int rc;
+ struct code_entry entry;
+
+ rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ return bnx2x_test_nvram_dir(bp, &entry, buff);
+}
+
+static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
+ struct code_entry entry;
+ int i;
+
+ rc = bnx2x_nvram_read32(bp,
+ dir_offset +
+ sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
+ (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
+ return 0;
+
+ rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
+ &cnt, sizeof(u32));
+ if (rc)
+ return rc;
+
+ dir_offset = entry.nvm_start_addr + 8;
+
+ for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, dir_offset = NVRAM_DIR_OFFSET;
+ int i;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
+
+ for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return bnx2x_test_nvram_ext_dirs(bp, buff);
+}
+
+struct crc_pair {
+ int offset;
+ int size;
+};
+
+static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
+ const struct crc_pair *nvram_tbl, u8 *buf)
+{
+ int i;
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+ int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
+ nvram_tbl[i].size, buf);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] has failed crc test (rc %d)\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
static int bnx2x_test_nvram(struct bnx2x *bp)
{
- static const struct {
- int offset;
- int size;
- } nvram_tbl[] = {
+ const struct crc_pair nvram_tbl[] = {
{ 0, 0x14 }, /* bootstrap */
{ 0x14, 0xec }, /* dir */
{ 0x100, 0x350 }, /* manuf_info */
@@ -2542,30 +2768,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- __be32 *buf;
- u8 *data;
- int i, rc;
- u32 magic, crc;
+ const struct crc_pair nvram_tbl2[] = {
+ { 0x7e8, 0x350 }, /* manuf_info2 */
+ { 0xb38, 0xf0 }, /* feature_info */
+ { 0, 0 }
+ };
+
+ u8 *buf;
+ int rc;
+ u32 magic;
if (BP_NOMCP(bp))
return 0;
- buf = kmalloc(0x350, GFP_KERNEL);
+ buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
if (!buf) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
rc = -ENOMEM;
goto test_nvram_exit;
}
- data = (u8 *)buf;
- rc = bnx2x_nvram_read(bp, 0, data, 4);
+ rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
if (rc) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"magic value read (rc %d)\n", rc);
goto test_nvram_exit;
}
- magic = be32_to_cpu(buf[0]);
if (magic != 0x669955aa) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"wrong magic value (0x%08x)\n", magic);
@@ -2573,25 +2802,26 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
goto test_nvram_exit;
}
- for (i = 0; nvram_tbl[i].size; i++) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
+ if (rc)
+ goto test_nvram_exit;
- rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
- nvram_tbl[i].size);
- if (rc) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] read data (rc %d)\n", i, rc);
- goto test_nvram_exit;
- }
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
+ u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_HIDE_PORT1;
- crc = ether_crc_le(nvram_tbl[i].size, data);
- if (crc != CRC32_RESIDUAL) {
+ if (!hide) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc);
- rc = -ENODEV;
- goto test_nvram_exit;
+ "Port 1 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
+ if (rc)
+ goto test_nvram_exit;
}
}
+ rc = bnx2x_test_nvram_dirs(bp, buf);
+
test_nvram_exit:
kfree(buf);
return rc;
@@ -3232,7 +3462,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ if (IS_PF(bp))
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ else /* vf */
+ SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c9..84aecdf06f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -389,4 +386,8 @@
#define UNDEF_IRO 0x80000000
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
+
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc34..12f00a40cdf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -2821,8 +2840,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5061,6 +5216,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 0283f343b0d..9d64b988ab3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
int rc = 0;
struct bnx2x *bp = params->bp;
- if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
- DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
- return -EINVAL;
- }
-
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
xfer_cnt);
@@ -3426,13 +3426,19 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ switch (params->req_fc_auto_adv) {
+ case BNX2X_FLOW_CTRL_BOTH:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- else
+ break;
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_TX:
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+ default:
+ break;
+ }
break;
-
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
@@ -3629,6 +3635,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3728,7 +3744,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3769,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3909,6 +3918,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3963,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4126,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4767,8 @@ void bnx2x_link_status_update(struct link_params *params,
port_mb[port].link_status));
/* Force link UP in non LOOPBACK_EXT loopback mode(s) */
- if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
- bp->link_params.loopback_mode != LOOPBACK_EXT)
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
vars->link_status |= LINK_STATUS_LINK_UP;
if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7775,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -7771,7 +7789,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | (dev_addr << 8)));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7863,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
+ u8 dev_addr,
u16 addr, u8 byte_cnt,
u8 *o_buf, u8 is_init)
{
@@ -7869,7 +7888,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7885,7 +7904,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -7896,6 +7916,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -7968,26 +7997,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
{
- int rc = -EOPNOTSUPP;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
- rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf, 0);
- break;
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
}
return rc;
}
@@ -8004,6 +8051,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_CON_TYPE_ADDR,
2,
(u8 *)val) != 0) {
@@ -8021,6 +8069,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*/
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) != 0) {
@@ -8049,20 +8098,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8154,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
@@ -8167,6 +8221,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
@@ -8175,6 +8230,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -8205,12 +8261,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
- params, 1,
- 1, &val, 1);
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
- &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
@@ -8219,7 +8276,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
}
usleep_range(5000, 10000);
}
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
return rc;
}
@@ -8376,15 +8434,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9577,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < ARRAY_SIZE(reg_set);
- i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -10281,7 +10329,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -12242,7 +12291,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12310,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12570,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->req_line_speed[0], params->req_flow_ctrl[0]);
DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -13440,8 +13490,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
int sigdet;
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
- * since some switches tend to reinit the AN process and clear the
- * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
@@ -13469,8 +13519,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
return;
}
@@ -13486,7 +13538,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
- next_page);
+ next_page);
bnx2x_kr2_recovery(params, vars, phy);
}
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c..4df45234fdc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c50696b396f..b4c9dea93a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
#ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6018,10 +6030,11 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
mmiowb();
}
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
{
int i;
+ /* Setup NIC internals and enable interrupts */
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
@@ -6030,17 +6043,26 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
return;
+ }
- /* Initialize MOD_ABS interrupts */
- bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
- bp->common.shmem_base, bp->common.shmem2_base,
- BP_PORT(bp));
+ if (IS_PF(bp)) {
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base,
+ bp->common.shmem2_base, BP_PORT(bp));
- bnx2x_init_def_sb(bp);
- bnx2x_update_dsb_idx(bp);
- bnx2x_init_sp_ring(bp);
+ /* initialize the default status block and sp ring */
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_sp_ring(bp);
+ }
+}
+
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+{
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
@@ -6058,12 +6080,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
AEU_INPUTS_ATTN_BITS_SPIO5);
}
-/* end of nic init */
-
-/*
- * gzip service functions
- */
-
+/* gzip service functions */
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@ -7757,6 +7774,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+
bnx2x_iov_free_mem(bp);
}
@@ -7773,7 +7792,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
sizeof(struct
host_hc_status_block_e1x));
- if (CONFIGURE_NIC_MODE(bp))
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
/* allocate searcher T2 table, as it wan't allocated before */
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -7796,7 +7815,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp))
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
/* allocate searcher T2 table */
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -7917,8 +7936,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- unsigned long ramrod_flags = 0;
-
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@ -7926,12 +7943,18 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
return 0;
}
- DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- /* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
- set, BNX2X_ETH_MAC, &ramrod_flags);
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+ &bp->sp_objs->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+ } else { /* vf */
+ return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+ bp->fp->index, true);
+ }
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -9525,6 +9548,10 @@ sp_rtnl_not_reset:
bnx2x_vfpf_storm_rx_mode(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9532,8 +9559,10 @@ sp_rtnl_not_reset:
/* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state))
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp);
+ }
}
static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9730,31 @@ static struct bnx2x_prev_path_list *
return NULL;
}
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9763,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
if (down_trylock(&bnx2x_prev_sem))
return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- bp->pdev->bus->number == tmp_list->bus &&
- BP_PATH(bp) == tmp_list->path) {
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
rc = true;
BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
BP_PATH(bp));
- break;
}
}
@@ -9730,6 +9785,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
struct bnx2x_prev_path_list *tmp_list;
int rc;
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9816,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9824,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
BNX2X_ERR("Received %d when tried to take lock\n", rc);
kfree(tmp_list);
} else {
- BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- BP_PATH(bp));
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
list_add(&tmp_list->list, &bnx2x_prev_list);
up(&bnx2x_prev_sem);
}
@@ -9990,6 +10068,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
}
do {
+ int aer = 0;
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
@@ -9998,7 +10077,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break;
}
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
rc = bnx2x_prev_unload_common(bp);
break;
}
@@ -10038,8 +10128,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -10575,10 +10669,12 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->link_params.speed_cap_mask[0] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask);
+ dev_info.port_hw_config[port].speed_capability_mask) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->link_params.speed_cap_mask[1] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask2);
+ dev_info.port_hw_config[port].speed_capability_mask2) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->port.link_config[0] =
SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
@@ -10703,6 +10799,12 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+ /* Calculate the number of maximum allowed FCoE tasks */
+ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+ if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
+ bp->cnic_eth_dev.max_fcoe_exchanges /=
+ MAX_FCOE_FUNCS_PER_ENGINE;
+
/* Read the WWN: */
if (!IS_MF(bp)) {
/* Port info */
@@ -10816,14 +10918,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- /* use FIP MAC as primary MAC */
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11060,6 +11160,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11406,26 +11509,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm failes) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held).
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
- return 0;
-}
-
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11795,6 +11878,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_setup_tc = bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11957,19 +12042,26 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
- NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
if (bp->flags & USING_DAC_FLAG)
dev->features |= NETIF_F_HIGHDMA;
@@ -12451,7 +12543,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
* l2 connections.
*/
if (IS_VF(bp)) {
- bnx2x_vf_map_doorbells(bp);
+ bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
goto init_one_exit;
@@ -12479,13 +12571,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
- /* Enable SRIOV if capability found in configuration space.
- * Once the generic SR-IOV framework makes it in from the
- * pci tree this will be revised, to allow dynamic control
- * over the number of VFs. Right now, change the num of vfs
- * param below to enable SR-IOV.
- */
- rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
goto init_one_exit;
@@ -12497,16 +12584,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -12640,9 +12717,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
{
- int i;
-
- bp->state = BNX2X_STATE_ERROR;
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@ -12651,29 +12726,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
-
- bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
+ netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
-
- for_each_rx_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
- bnx2x_free_mem(bp);
+ spin_lock_bh(&bp->stats_lock);
+ bp->stats_state = STATS_STATE_DISABLED;
+ spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED;
+ bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
@@ -12709,6 +12776,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ BNX2X_ERR("IO error detected\n");
+
netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) {
@@ -12719,6 +12788,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev))
bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp);
+
pci_disable_device(pdev);
rtnl_unlock();
@@ -12737,9 +12808,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ int i;
rtnl_lock();
-
+ BNX2X_ERR("IO slot reset initializing...\n");
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
@@ -12749,10 +12821,47 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have resetted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED;
@@ -12779,6 +12888,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
if (netif_running(dev))
bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -12801,6 +12913,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
};
static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d5301..d22bc40091e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90..32a9609cc98 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
}
}
return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
-
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.
+ vlan_mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9f..43c00bc84a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa209358..2ce7c747136 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
- list_del(&pos->link);
- list_add(&pos->link, &rollback_list);
+ list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
@@ -958,6 +959,12 @@ op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
+ vf->cfg_flags |= VF_CFG_VLAN;
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev);
unknown_dev:
- BNX2X_ERR("Unknown device\n");
return false;
}
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SRIOV can be enabled only with MSIX */
if (int_mode_param == BNX2X_INT_MODE_MSI ||
- int_mode_param == BNX2X_INT_MODE_INTX)
+ int_mode_param == BNX2X_INT_MODE_INTX) {
BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
err = -EIO;
/* verify ari is enabled */
if (!bnx2x_ari_enabled(bp->pdev)) {
- BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
- return err;
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
}
/* verify igu is in normal mode */
if (CHIP_INT_MODE_IS_BC(bp)) {
BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
- return err;
+ return 0;
}
/* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0)
goto failed;
- /* calculate the actual number of VFs */
- iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
- abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2436,8 +2446,8 @@ get_vf:
/* Do nothing for now */
break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
- vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
break;
}
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE;
}
-void bnx2x_enable_sriov(struct bnx2x *bp)
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
- int rc = 0;
- /* disbale sriov in case it is still enabled */
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+}
+
+static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf *vf)
+{
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ if (!vf) {
+ BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ int rc;
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
+ return rc;
+ if (!mac_obj || !vlan_obj || !bulletin) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
+ 0, VLAN_HLEN);
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* funtion has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* funtion has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
}
/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
+ * again after consulting its bulletin board.
*/
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
+ int rc, q_logical_state;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
}
/* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
+ BNX2X_ETH_MAC, &ramrod_flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return rc;
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does.
+ */
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the vlan in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ unsigned long vlan_mac_flags = 0;
+ struct bnx2x_vlan_mac_obj *vlan_obj =
+ &bnx2x_vfq(vf, 0, vlan_obj);
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ return -EINVAL;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure the new vlan to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ return rc;
+ }
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured th Vlan before vf was
+ * and we were called because the VF came up later
+ */
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ }
+ return 0;
}
/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
return PFVF_BULLETIN_UPDATED;
}
-void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
{
/* vf doorbells are embedded within the regview */
- bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+ return bp->regview + PXP_VF_ADDR_DB_START;
}
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
+ mutex_init(&bp->vf2pf_mutex);
+
/* allocate vf2pf mailbox for vf to pf channel */
BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
+
+int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held). We only want to do this if the number of VFs
+ * was set before PF driver was loaded.
+ */
+ if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add..d67ddc554c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
u16 length);
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length);
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
bool bnx2x_tlv_supported(u16 tlvtype);
@@ -731,7 +733,7 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
-int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
-void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
-void bnx2x_enable_sriov(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
-static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -787,7 +794,8 @@ static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
-static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+ u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
@@ -802,8 +810,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
return PFVF_BULLETIN_UNCHANGED;
}
-static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2..2ca3d94fcec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
}
}
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
- int i;
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
&(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
}
- /* function stats */
- for_each_queue(bp, i) {
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
-
- memset(&fp_stats->old_tclient, 0,
- sizeof(fp_stats->old_tclient));
- memset(&fp_stats->old_uclient, 0,
- sizeof(fp_stats->old_uclient));
- memset(&fp_stats->old_xclient, 0,
- sizeof(fp_stats->old_xclient));
- if (bp->stats_init) {
- memset(&fp_stats->eth_q_stats, 0,
- sizeof(fp_stats->eth_q_stats));
- memset(&fp_stats->eth_q_stats_old, 0,
- sizeof(fp_stats->eth_q_stats_old));
- }
- }
-
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ /* Clean SP from previous statistics */
if (bp->stats_init) {
- memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
- memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
- memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
- memset(&bp->func_stats, 0, sizeof(bp->func_stats));
-
- /* Clean SP from previous statistics */
if (bp->func_stx) {
memset(bnx2x_sp(bp, func_stats), 0,
sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
}
}
- bp->stats_state = STATS_STATE_DISABLED;
-
- if (bp->port.pmf && bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
-
- /* mark the end of statistics initializiation */
- bp->stats_init = false;
+ bnx2x_memset_stats(bp);
}
void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad..d117f472816 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
/* forward */
struct bnx2x;
+void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
-
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d6..928b074d7d8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length)
{
+ mutex_lock(&bp->vf2pf_mutex);
+
DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
type);
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
}
+/* releases the mailbox */
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF timeout */
if (rc)
- return rc;
+ goto out;
/* copy acquire response from buffer to bp */
memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF reports error */
BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
bp->acquire_resp.hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
}
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->acquire_resp.resc.current_mac_addr,
ETH_ALEN);
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
int bnx2x_vfpf_release(struct bnx2x *bp)
{
struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- u32 rc = 0, vf_id;
+ u32 rc, vf_id;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vf_id = vf_id;
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
if (rc)
/* PF timeout */
- return rc;
+ goto out;
+
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
/* PF released us */
DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
/* PF reports error */
BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc)
- return rc;
+ goto out;
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
}
/* CLOSE VF - opposite to INIT_VF */
@@ -380,6 +406,9 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
for_each_queue(bp, i)
bnx2x_vfpf_teardown_queue(bp, i);
+ /* remove mac */
+ bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
+
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
@@ -401,6 +430,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
resp->hdr.status);
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
free_irq:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 0);
@@ -435,7 +466,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
- flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
flags |= VFPF_QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -486,8 +516,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
fp_idx, resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -515,41 +548,46 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
if (rc) {
BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
rc);
- return rc;
+ goto out;
}
/* PF failed the transaction */
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int rc = 0;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
sizeof(*req));
req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
- req->vf_qid = 0;
+ req->vf_qid = vf_qid;
req->n_mac_vlan_filters = 1;
- req->filters[0].flags =
- VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
+ if (set)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
/* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp);
/* copy mac from device to request */
- memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(req->filters[0].mac, addr, ETH_ALEN);
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
@@ -562,7 +600,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
- return rc;
+ goto out;
}
/* failure may mean PF was configured with a new mac for us */
@@ -570,6 +608,9 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
DP(BNX2X_MSG_IOV,
"vfpf SET MAC failed. Check bulletin board for new posts\n");
+ /* copy mac from bulletin to device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
/* copy mac from device to request */
@@ -587,8 +628,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -643,14 +686,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("Sending a message failed: %d\n", rc);
- return rc;
+ goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -689,7 +734,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
break;
default:
BNX2X_ERR("BAD rx mode (%d)\n", mode);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +754,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
@@ -1004,7 +1052,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
/* convert MBX queue-flags to standard SP queue-flags */
-static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
unsigned long *sp_q_flags)
{
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1063,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
- if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
- __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1071,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+
+ /* outer vlan removal is set according to the PF's multi fuction mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
}
static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1125,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
init_p->tx.hc_rate = setup_q->txq.hc_rate;
init_p->tx.sb_cq_index = setup_q->txq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&init_p->tx.flags);
/* tx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&setup_p->flags);
/* tx setup - general, nothing */
@@ -1107,11 +1157,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* rx init */
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&init_p->rx.flags);
/* rx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&setup_p->flags);
/* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00..41708faab57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
u8 mac[ETH_ALEN];
- u8 padding[2];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
};
union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 149a3a03849..40649a8bf39 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5544,8 +5544,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (CNIC_SUPPORTS_FCOE(cp))
+ if (CNIC_SUPPORTS_FCOE(cp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+ cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
+ }
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0c9367a0f57..ec9bb9ad4bb 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -195,6 +195,7 @@ struct cnic_eth_dev {
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 fcoe_init_cid;
+ u32 max_fcoe_exchanges;
u32 fcoe_wwn_port_name_hi;
u32 fcoe_wwn_port_name_lo;
u32 fcoe_wwn_node_name_hi;
@@ -313,6 +314,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ int max_fcoe_exchanges;
+
union drv_info_to_mcp *stats_addr;
struct fcoe_capabilities *fcoe_cap;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375c..e80bfb60c3e 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
- if (sb_new == NULL) {
- pr_info("%s: sk_buff allocation failed\n",
- d->sbdma_eth->sbm_dev->name);
+ if (sb_new == NULL)
return -ENOBUFS;
- }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba..728d42ab2a7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 130
+#define TG3_MIN_NUM 131
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "February 14, 2013"
+#define DRV_MODULE_RELDATE "April 09, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG357766 "tigon/tg357766.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -1873,6 +1874,20 @@ static void tg3_link_report(struct tg3 *tp)
tp->link_up = netif_carrier_ok(tp->dev);
}
+static u32 tg3_decode_flowctrl_1000T(u32 adv)
+{
+ u32 flowctrl = 0;
+
+ if (adv & ADVERTISE_PAUSE_CAP) {
+ flowctrl |= FLOW_CTRL_RX;
+ if (!(adv & ADVERTISE_PAUSE_ASYM))
+ flowctrl |= FLOW_CTRL_TX;
+ } else if (adv & ADVERTISE_PAUSE_ASYM)
+ flowctrl |= FLOW_CTRL_TX;
+
+ return flowctrl;
+}
+
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1889,6 +1904,20 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
return miireg;
}
+static u32 tg3_decode_flowctrl_1000X(u32 adv)
+{
+ u32 flowctrl = 0;
+
+ if (adv & ADVERTISE_1000XPAUSE) {
+ flowctrl |= FLOW_CTRL_RX;
+ if (!(adv & ADVERTISE_1000XPSE_ASYM))
+ flowctrl |= FLOW_CTRL_TX;
+ } else if (adv & ADVERTISE_1000XPSE_ASYM)
+ flowctrl |= FLOW_CTRL_TX;
+
+ return flowctrl;
+}
+
static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
@@ -2199,7 +2228,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
}
-static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
{
u32 phy;
@@ -2291,7 +2320,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
{
u32 val;
@@ -2301,7 +2330,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tp->setlpicnt = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
- current_link_up == 1 &&
+ current_link_up &&
tp->link_config.active_duplex == DUPLEX_FULL &&
(tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_1000)) {
@@ -2323,7 +2352,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
}
if (!tp->setlpicnt) {
- if (current_link_up == 1 &&
+ if (current_link_up &&
!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
tg3_phy_toggle_auxctl_smdsp(tp, false);
@@ -2530,6 +2559,13 @@ static void tg3_carrier_off(struct tg3 *tp)
tp->link_up = false;
}
+static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
+{
+ if (tg3_flag(tp, ENABLE_ASF))
+ netdev_warn(tp->dev,
+ "Management side-band traffic will be interrupted during phy settings change\n");
+}
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -2669,7 +2705,7 @@ out:
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
tg3_phydsp_write(tp, 0xffb, 0x4000);
- tg3_phy_toggle_automdix(tp, 1);
+ tg3_phy_toggle_automdix(tp, true);
tg3_phy_set_wirespeed(tp);
return 0;
}
@@ -2925,6 +2961,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
+ if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
+ return;
+
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
@@ -3448,11 +3487,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
#define TX_CPU_SCRATCH_SIZE 0x04000
/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
{
int i;
+ const int iters = 10000;
+
+ for (i = 0; i < iters; i++) {
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+
+ return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+ return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+}
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+ tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ int rc;
+
+ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3546,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
return 0;
}
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
+ if (cpu_base == RX_CPU_BASE) {
+ rc = tg3_rxcpu_pause(tp);
} else {
/*
* There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3556,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
if (tg3_flag(tp, IS_SSB_CORE))
return 0;
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
+ rc = tg3_txcpu_pause(tp);
}
- if (i >= 10000) {
+ if (rc) {
netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -3499,19 +3571,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
return 0;
}
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
+static int tg3_fw_data_len(struct tg3 *tp,
+ const struct tg3_firmware_hdr *fw_hdr)
+{
+ int fw_len;
+
+ /* Non fragmented firmware have one firmware header followed by a
+ * contiguous chunk of data to be written. The length field in that
+ * header is not the length of data to be written but the complete
+ * length of the bss. The data length is determined based on
+ * tp->fw->size minus headers.
+ *
+ * Fragmented firmware have a main header followed by multiple
+ * fragments. Each fragment is identical to non fragmented firmware
+ * with a firmware header followed by a contiguous chunk of data. In
+ * the main header, the length field is unused and set to 0xffffffff.
+ * In each fragment header the length is the entire size of that
+ * fragment i.e. fragment data + header length. Data length is
+ * therefore length field in the header minus TG3_FW_HDR_LEN.
+ */
+ if (tp->fw_len == 0xffffffff)
+ fw_len = be32_to_cpu(fw_hdr->len);
+ else
+ fw_len = tp->fw->size;
+
+ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
u32 cpu_scratch_base, int cpu_scratch_size,
- struct fw_info *info)
+ const struct tg3_firmware_hdr *fw_hdr)
{
- int err, lock_err, i;
+ int err, i;
void (*write_op)(struct tg3 *, u32, u32);
+ int total_len = tp->fw->size;
if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
@@ -3520,30 +3614,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
return -EINVAL;
}
- if (tg3_flag(tp, 5705_PLUS))
+ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
+ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ int lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE,
+ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+ } else {
+ /* Subtract additional main header for fragmented firmware and
+ * advance to the first fragment
+ */
+ total_len -= TG3_FW_HDR_LEN;
+ fw_hdr++;
+ }
+
+ do {
+ u32 *fw_data = (u32 *)(fw_hdr + 1);
+ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+ write_op(tp, cpu_scratch_base +
+ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+ (i * sizeof(u32)),
+ be32_to_cpu(fw_data[i]));
+
+ total_len -= be32_to_cpu(fw_hdr->len);
+
+ /* Advance to next fragment */
+ fw_hdr = (struct tg3_firmware_hdr *)
+ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+ } while (total_len > 0);
err = 0;
@@ -3552,13 +3665,33 @@ out:
}
/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+ int i;
+ const int iters = 5;
+
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, pc);
+
+ for (i = 0; i < iters; i++) {
+ if (tr32(cpu_base + CPU_PC) == pc)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, pc);
+ udelay(1000);
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
+ const struct tg3_firmware_hdr *fw_hdr;
+ int err;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3566,60 +3699,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
"should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ tr32(RX_CPU_BASE + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ tg3_rxcpu_resume(tp);
return 0;
}
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+ const int iters = 1000;
+ int i;
+ u32 val;
+
+ /* Wait for boot code to complete initialization and enter service
+ * loop. It is then safe to download service patches
+ */
+ for (i = 0; i < iters; i++) {
+ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+ break;
+
+ udelay(10);
+ }
+
+ if (i == iters) {
+ netdev_err(tp->dev, "Boot code not ready for service patches\n");
+ return -EBUSY;
+ }
+
+ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+ if (val & 0xff) {
+ netdev_warn(tp->dev,
+ "Other patches exist. Not downloading EEE patch\n");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+ struct tg3_firmware_hdr *fw_hdr;
+
+ if (!tg3_flag(tp, NO_NVRAM))
+ return;
+
+ if (tg3_validate_rxcpu_state(tp))
+ return;
+
+ if (!tp->fw)
+ return;
+
+ /* This firmware blob has a different format than older firmware
+ * releases as given below. The main difference is we have fragmented
+ * data to be written to non-contiguous locations.
+ *
+ * In the beginning we have a firmware header identical to other
+ * firmware which consists of version, base addr and length. The length
+ * here is unused and set to 0xffffffff.
+ *
+ * This is followed by a series of firmware fragments which are
+ * individually identical to previous firmware. i.e. they have the
+ * firmware header and followed by data for that fragment. The version
+ * field of the individual fragment header is unused.
+ */
+
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+ return;
+
+ if (tg3_rxcpu_pause(tp))
+ return;
+
+ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
+ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+ tg3_rxcpu_resume(tp);
+}
+
/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
+ int err;
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
+ if (!tg3_flag(tp, FW_TSO))
return 0;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3627,10 +3817,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
@@ -3643,36 +3830,28 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
err = tg3_load_firmware_cpu(tp, cpu_base,
cpu_scratch_base, cpu_scratch_size,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev,
"%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ __func__, tr32(cpu_base + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
+
+ tg3_resume_cpu(tp, cpu_base);
return 0;
}
/* tp->lock is held. */
-static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
u32 addr_high, addr_low;
int i;
@@ -3735,7 +3914,7 @@ static int tg3_power_up(struct tg3 *tp)
return err;
}
-static int tg3_setup_phy(struct tg3 *, int);
+static int tg3_setup_phy(struct tg3 *, bool);
static int tg3_power_down_prepare(struct tg3 *tp)
{
@@ -3807,7 +3986,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
}
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
@@ -3848,7 +4027,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
mac_mode = MAC_MODE_PORT_MODE_GMII;
- else
+ else if (tp->phy_flags &
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
+ if (tp->link_config.active_speed == SPEED_1000)
+ mac_mode = MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode = MAC_MODE_PORT_MODE_MII;
+ } else
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
@@ -4102,12 +4287,16 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
u32 adv, fc;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
adv = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full;
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
+ adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -4121,6 +4310,15 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tg3_phy_autoneg_cfg(tp, adv, fc);
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
+ /* Normally during power down we want to autonegotiate
+ * the lowest possible speed for WOL. However, to avoid
+ * link flap, we leave it untouched.
+ */
+ return;
+ }
+
tg3_writephy(tp, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART);
} else {
@@ -4177,6 +4375,103 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
}
}
+static int tg3_phy_pull_config(struct tg3 *tp)
+{
+ int err;
+ u32 val;
+
+ err = tg3_readphy(tp, MII_BMCR, &val);
+ if (err)
+ goto done;
+
+ if (!(val & BMCR_ANENABLE)) {
+ tp->link_config.autoneg = AUTONEG_DISABLE;
+ tp->link_config.advertising = 0;
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+ err = -EIO;
+
+ switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
+ case 0:
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ goto done;
+
+ tp->link_config.speed = SPEED_10;
+ break;
+ case BMCR_SPEED100:
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ goto done;
+
+ tp->link_config.speed = SPEED_100;
+ break;
+ case BMCR_SPEED1000:
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ tp->link_config.speed = SPEED_1000;
+ break;
+ }
+ /* Fall through */
+ default:
+ goto done;
+ }
+
+ if (val & BMCR_FULLDPLX)
+ tp->link_config.duplex = DUPLEX_FULL;
+ else
+ tp->link_config.duplex = DUPLEX_HALF;
+
+ tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+ err = 0;
+ goto done;
+ }
+
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tp->link_config.advertising = ADVERTISED_Autoneg;
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ u32 adv;
+
+ err = tg3_readphy(tp, MII_ADVERTISE, &val);
+ if (err)
+ goto done;
+
+ adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
+ tp->link_config.advertising |= adv | ADVERTISED_TP;
+
+ tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
+ } else {
+ tp->link_config.advertising |= ADVERTISED_FIBRE;
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 adv;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ err = tg3_readphy(tp, MII_CTRL1000, &val);
+ if (err)
+ goto done;
+
+ adv = mii_ctrl1000_to_ethtool_adv_t(val);
+ } else {
+ err = tg3_readphy(tp, MII_ADVERTISE, &val);
+ if (err)
+ goto done;
+
+ adv = tg3_decode_flowctrl_1000X(val);
+ tp->link_config.flowctrl = adv;
+
+ val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
+ adv = mii_adv_to_ethtool_adv_x(val);
+ }
+
+ tp->link_config.advertising |= adv;
+ }
+
+done:
+ return err;
+}
+
static int tg3_init_5401phy_dsp(struct tg3 *tp)
{
int err;
@@ -4196,6 +4491,32 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
+static bool tg3_phy_eee_config_ok(struct tg3 *tp)
+{
+ u32 val;
+ u32 tgtadv = 0;
+ u32 advertising = tp->link_config.advertising;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ return true;
+
+ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
+ return false;
+
+ val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+
+
+ if (advertising & ADVERTISED_100baseT_Full)
+ tgtadv |= MDIO_AN_EEE_ADV_100TX;
+ if (advertising & ADVERTISED_1000baseT_Full)
+ tgtadv |= MDIO_AN_EEE_ADV_1000T;
+
+ if (val != tgtadv)
+ return false;
+
+ return true;
+}
+
static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
u32 advmsk, tgtadv, advertising;
@@ -4262,7 +4583,7 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
return true;
}
-static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
{
if (curr_link_up != tp->link_up) {
if (curr_link_up) {
@@ -4280,23 +4601,28 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
return false;
}
-static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+static void tg3_clear_mac_status(struct tg3 *tp)
{
- int current_link_up;
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED);
+ udelay(40);
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
+{
+ bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
u16 current_speed;
u8 current_duplex;
int i, err;
- tw32(MAC_EVENT, 0);
-
- tw32_f(MAC_STATUS,
- (MAC_STATUS_SYNC_CHANGED |
- MAC_STATUS_CFG_CHANGED |
- MAC_STATUS_MI_COMPLETION |
- MAC_STATUS_LNKSTATE_CHANGED));
- udelay(40);
+ tg3_clear_mac_status(tp);
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32_f(MAC_MI_MODE,
@@ -4316,7 +4642,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
!(bmsr & BMSR_LSTATUS))
- force_reset = 1;
+ force_reset = true;
}
if (force_reset)
tg3_phy_reset(tp);
@@ -4380,7 +4706,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
}
- current_link_up = 0;
+ current_link_up = false;
current_speed = SPEED_UNKNOWN;
current_duplex = DUPLEX_UNKNOWN;
tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
@@ -4439,21 +4765,31 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tp->link_config.active_duplex = current_duplex;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ bool eee_config_ok = tg3_phy_eee_config_ok(tp);
+
if ((bmcr & BMCR_ANENABLE) &&
+ eee_config_ok &&
tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
- current_link_up = 1;
+ current_link_up = true;
+
+ /* EEE settings changes take effect only after a phy
+ * reset. If we have skipped a reset due to Link Flap
+ * Avoidance being enabled, do it now.
+ */
+ if (!eee_config_ok &&
+ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !force_reset)
+ tg3_phy_reset(tp);
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
- tp->link_config.duplex == current_duplex &&
- tp->link_config.flowctrl ==
- tp->link_config.active_flowctrl) {
- current_link_up = 1;
+ tp->link_config.duplex == current_duplex) {
+ current_link_up = true;
}
}
- if (current_link_up == 1 &&
+ if (current_link_up &&
tp->link_config.active_duplex == DUPLEX_FULL) {
u32 reg, bit;
@@ -4473,11 +4809,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
relink:
- if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
if (tg3_flag(tp, ROBOSWITCH)) {
- current_link_up = 1;
+ current_link_up = true;
/* FIXME: when BCM5325 switch is used use 100 MBit/s */
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
@@ -4488,11 +4824,11 @@ relink:
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
- current_link_up = 1;
+ current_link_up = true;
}
tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
- if (current_link_up == 1) {
+ if (current_link_up) {
if (tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_10)
tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -4528,7 +4864,7 @@ relink:
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
if (tg3_asic_rev(tp) == ASIC_REV_5700) {
- if (current_link_up == 1 &&
+ if (current_link_up &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
else
@@ -4559,7 +4895,7 @@ relink:
udelay(40);
if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
- current_link_up == 1 &&
+ current_link_up &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
udelay(120);
@@ -4999,19 +5335,19 @@ static void tg3_init_bcm8002(struct tg3 *tp)
tg3_writephy(tp, 0x10, 0x8011);
}
-static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
{
u16 flowctrl;
+ bool current_link_up;
u32 sg_dig_ctrl, sg_dig_status;
u32 serdes_cfg, expected_sg_dig_ctrl;
int workaround, port_a;
- int current_link_up;
serdes_cfg = 0;
expected_sg_dig_ctrl = 0;
workaround = 0;
port_a = 1;
- current_link_up = 0;
+ current_link_up = false;
if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
@@ -5042,7 +5378,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
}
if (mac_status & MAC_STATUS_PCS_SYNCED) {
tg3_setup_flow_control(tp, 0, 0);
- current_link_up = 1;
+ current_link_up = true;
}
goto out;
}
@@ -5063,7 +5399,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
MAC_STATUS_RCVD_CFG)) ==
MAC_STATUS_PCS_SYNCED)) {
tp->serdes_counter--;
- current_link_up = 1;
+ current_link_up = true;
goto out;
}
restart_autoneg:
@@ -5098,7 +5434,7 @@ restart_autoneg:
mii_adv_to_ethtool_adv_x(remote_adv);
tg3_setup_flow_control(tp, local_adv, remote_adv);
- current_link_up = 1;
+ current_link_up = true;
tp->serdes_counter = 0;
tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
@@ -5126,7 +5462,7 @@ restart_autoneg:
if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
!(mac_status & MAC_STATUS_RCVD_CFG)) {
tg3_setup_flow_control(tp, 0, 0);
- current_link_up = 1;
+ current_link_up = true;
tp->phy_flags |=
TG3_PHYFLG_PARALLEL_DETECT;
tp->serdes_counter =
@@ -5144,9 +5480,9 @@ out:
return current_link_up;
}
-static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
{
- int current_link_up = 0;
+ bool current_link_up = false;
if (!(mac_status & MAC_STATUS_PCS_SYNCED))
goto out;
@@ -5173,7 +5509,7 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
tg3_setup_flow_control(tp, local_adv, remote_adv);
- current_link_up = 1;
+ current_link_up = true;
}
for (i = 0; i < 30; i++) {
udelay(20);
@@ -5188,15 +5524,15 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
}
mac_status = tr32(MAC_STATUS);
- if (current_link_up == 0 &&
+ if (!current_link_up &&
(mac_status & MAC_STATUS_PCS_SYNCED) &&
!(mac_status & MAC_STATUS_RCVD_CFG))
- current_link_up = 1;
+ current_link_up = true;
} else {
tg3_setup_flow_control(tp, 0, 0);
/* Forcing 1000FD link up. */
- current_link_up = 1;
+ current_link_up = true;
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
@@ -5209,13 +5545,13 @@ out:
return current_link_up;
}
-static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
u16 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
- int current_link_up;
+ bool current_link_up;
int i;
orig_pause_cfg = tp->link_config.active_flowctrl;
@@ -5252,7 +5588,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = 0;
+ current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -5277,7 +5613,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
mac_status = tr32(MAC_STATUS);
if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
- current_link_up = 0;
+ current_link_up = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
tp->serdes_counter == 0) {
tw32_f(MAC_MODE, (tp->mac_mode |
@@ -5287,7 +5623,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
}
}
- if (current_link_up == 1) {
+ if (current_link_up) {
tp->link_config.active_speed = SPEED_1000;
tp->link_config.active_duplex = DUPLEX_FULL;
tw32(MAC_LED_CTRL, (tp->led_ctrl |
@@ -5312,33 +5648,63 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
return 0;
}
-static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
- int current_link_up, err = 0;
+ int err = 0;
u32 bmsr, bmcr;
- u16 current_speed;
- u8 current_duplex;
- u32 local_adv, remote_adv;
+ u16 current_speed = SPEED_UNKNOWN;
+ u8 current_duplex = DUPLEX_UNKNOWN;
+ bool current_link_up = false;
+ u32 local_adv, remote_adv, sgsr;
+
+ if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) &&
+ !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
+ (sgsr & SERDES_TG3_SGMII_MODE)) {
+
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+
+ if (!(sgsr & SERDES_TG3_LINK_UP)) {
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else {
+ current_link_up = true;
+ if (sgsr & SERDES_TG3_SPEED_1000) {
+ current_speed = SPEED_1000;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else if (sgsr & SERDES_TG3_SPEED_100) {
+ current_speed = SPEED_100;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ } else {
+ current_speed = SPEED_10;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ }
+
+ if (sgsr & SERDES_TG3_FULL_DUPLEX)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+ }
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tg3_clear_mac_status(tp);
+
+ goto fiber_setup_done;
+ }
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
- tw32(MAC_EVENT, 0);
-
- tw32_f(MAC_STATUS,
- (MAC_STATUS_SYNC_CHANGED |
- MAC_STATUS_CFG_CHANGED |
- MAC_STATUS_MI_COMPLETION |
- MAC_STATUS_LNKSTATE_CHANGED));
- udelay(40);
+ tg3_clear_mac_status(tp);
if (force_reset)
tg3_phy_reset(tp);
- current_link_up = 0;
- current_speed = SPEED_UNKNOWN;
- current_duplex = DUPLEX_UNKNOWN;
tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -5424,7 +5790,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
if (bmsr & BMSR_LSTATUS) {
current_speed = SPEED_1000;
- current_link_up = 1;
+ current_link_up = true;
if (bmcr & BMCR_FULLDPLX)
current_duplex = DUPLEX_FULL;
else
@@ -5451,12 +5817,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
- current_link_up = 0;
+ current_link_up = false;
}
}
}
- if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+fiber_setup_done:
+ if (current_link_up && current_duplex == DUPLEX_FULL)
tg3_setup_flow_control(tp, local_adv, remote_adv);
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
@@ -5535,7 +5902,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
}
}
-static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
{
u32 val;
int err;
@@ -5625,10 +5992,13 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ if (tg3_flag(tp, PTP_CAPABLE)) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ }
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
@@ -6348,7 +6718,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (desc->type_flags & RXD_FLAG_VLAN &&
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
- __vlan_hwaccel_put_tag(skb,
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
desc->err_vlan & RXD_VLAN_MASK);
napi_gro_receive(&tnapi->napi, skb);
@@ -6436,7 +6806,7 @@ static void tg3_poll_link(struct tg3 *tp)
MAC_STATUS_LNKSTATE_CHANGED));
udelay(40);
} else
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
spin_unlock(&tp->lock);
}
}
@@ -7533,7 +7903,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
u32 val, bmcr, mac_mode, ptest = 0;
tg3_phy_toggle_apd(tp, false);
- tg3_phy_toggle_automdix(tp, 0);
+ tg3_phy_toggle_automdix(tp, false);
if (extlpbk && tg3_phy_set_extloopbk(tp))
return -EIO;
@@ -7641,7 +8011,7 @@ static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
spin_lock_bh(&tp->lock);
tg3_mac_loopback(tp, false);
/* Force link status check */
- tg3_setup_phy(tp, 1);
+ tg3_setup_phy(tp, true);
spin_unlock_bh(&tp->lock);
netdev_info(dev, "Internal MAC loopback mode disabled.\n");
}
@@ -8039,11 +8409,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb)
goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
@@ -8093,12 +8461,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
@@ -8106,11 +8472,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE,
&tnapi->status_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status)
goto err_out;
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
@@ -8157,7 +8522,7 @@ err_out:
/* To stop a block, clear the enable bit and poll till it
* clears. tp->lock is held.
*/
-static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
{
unsigned int i;
u32 val;
@@ -8201,7 +8566,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
/* tp->lock is held. */
-static int tg3_abort_hw(struct tg3 *tp, int silent)
+static int tg3_abort_hw(struct tg3 *tp, bool silent)
{
int i, err;
@@ -8561,6 +8926,9 @@ static int tg3_chip_reset(struct tg3 *tp)
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
+ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -8572,6 +8940,12 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->last_event_jiffies = jiffies;
if (tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
+ if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
+ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
+ if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
+ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
}
}
@@ -8582,7 +8956,7 @@ static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
/* tp->lock is held. */
-static int tg3_halt(struct tg3 *tp, int kind, int silent)
+static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
int err;
@@ -8593,7 +8967,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_abort_hw(tp, silent);
err = tg3_chip_reset(tp);
- __tg3_set_mac_addr(tp, 0);
+ __tg3_set_mac_addr(tp, false);
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
@@ -8617,7 +8991,8 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
- int err = 0, skip_mac_1 = 0;
+ int err = 0;
+ bool skip_mac_1 = false;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -8638,7 +9013,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
/* Skip MAC addr 1 if ASF is using it. */
if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
!(addr1_high == 0 && addr1_low == 0))
- skip_mac_1 = 1;
+ skip_mac_1 = true;
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
@@ -9057,7 +9432,7 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
}
/* tp->lock is held. */
-static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
@@ -9106,6 +9481,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
}
+ if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
+ tg3_phy_pull_config(tp);
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+ }
+
if (reset_phy)
tg3_phy_reset(tp);
@@ -9444,7 +9825,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_rings_reset(tp);
/* Initialize MAC address and backoff seed. */
- __tg3_set_mac_addr(tp, 0);
+ __tg3_set_mac_addr(tp, false);
/* MTU + ethernet header + FCS + optional VLAN tag */
tw32(MAC_RX_MTU_SIZE,
@@ -9781,6 +10162,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ /* Ignore any errors for the firmware download. If download
+ * fails, the device will operate with EEE disabled
+ */
+ tg3_load_57766_firmware(tp);
+ }
+
if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
@@ -9888,7 +10276,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
- err = tg3_setup_phy(tp, 0);
+ err = tg3_setup_phy(tp, false);
if (err)
return err;
@@ -9968,7 +10356,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Called at device open time to get the chip ready for
* packet processing. Invoked with tp->lock held.
*/
-static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
tg3_switch_clocks(tp);
@@ -10229,7 +10617,7 @@ static void tg3_timer(unsigned long __opaque)
phy_event = 1;
if (phy_event)
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
} else if (tg3_flag(tp, POLL_SERDES)) {
u32 mac_stat = tr32(MAC_STATUS);
int need_setup = 0;
@@ -10252,7 +10640,7 @@ static void tg3_timer(unsigned long __opaque)
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
}
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
}
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
@@ -10338,7 +10726,7 @@ static void tg3_timer_stop(struct tg3 *tp)
/* Restart hardware after configuration changes, self-test, etc.
* Invoked with tp->lock held.
*/
-static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
{
@@ -10388,7 +10776,7 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, true);
if (err)
goto out;
@@ -10558,7 +10946,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, true);
tg3_full_unlock(tp);
@@ -10570,7 +10958,7 @@ static int tg3_test_msi(struct tg3 *tp)
static int tg3_request_firmware(struct tg3 *tp)
{
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10966,15 @@ static int tg3_request_firmware(struct tg3 *tp)
return -ENOENT;
}
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
* start address and _full_ length including BSS sections
* (which must be longer than the actual data, of course
*/
- tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
- if (tp->fw_len < (tp->fw->size - 12)) {
+ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
@@ -10885,7 +11273,15 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ if (err) {
+ netdev_warn(tp->dev, "EEE capability disabled\n");
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+ netdev_warn(tp->dev, "EEE capability restored\n");
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ }
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10910,7 +11306,9 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
- err = tg3_start(tp, true, true, true);
+ err = tg3_start(tp,
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
+ true, true);
if (err) {
tg3_frob_aux_power(tp, false);
pci_set_power_state(tp->pdev, PCI_D3hot);
@@ -11416,8 +11814,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tp->link_config.duplex = cmd->duplex;
}
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
+ tg3_warn_mgmt_link_flap(tp);
+
if (netif_running(dev))
- tg3_setup_phy(tp, 1);
+ tg3_setup_phy(tp, true);
tg3_full_unlock(tp);
@@ -11494,6 +11896,8 @@ static int tg3_nway_reset(struct net_device *dev)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
return -EINVAL;
+ tg3_warn_mgmt_link_flap(tp);
+
if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
@@ -11571,7 +11975,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, false);
if (!err)
tg3_netif_start(tp);
}
@@ -11606,6 +12010,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
struct tg3 *tp = netdev_priv(dev);
int err = 0;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE)
+ tg3_warn_mgmt_link_flap(tp);
+
if (tg3_flag(tp, USE_PHYLIB)) {
u32 newadv;
struct phy_device *phydev;
@@ -11692,7 +12099,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, false);
if (!err)
tg3_netif_start(tp);
}
@@ -11700,6 +12107,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_full_unlock(tp);
}
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
return err;
}
@@ -12760,7 +13169,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
goto done;
}
- err = tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, true);
if (err) {
data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
@@ -12927,7 +13336,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, 1);
+ err2 = tg3_restart_hw(tp, true);
if (!err2)
tg3_netif_start(tp);
}
@@ -13244,7 +13653,8 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
- int err, reset_phy = 0;
+ int err;
+ bool reset_phy = false;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
@@ -13271,7 +13681,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
* breaks all requests to 256 bytes.
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766)
- reset_phy = 1;
+ reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
@@ -13837,6 +14247,12 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
case FLASH_5762_EEPROM_LD:
nvmpinstrp = FLASH_5720_EEPROM_LD;
break;
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ /* This pinstrap supports multiple sizes, so force it
+ * to read the actual size from location 0xf0.
+ */
+ nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
+ break;
}
}
@@ -14289,14 +14705,18 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
- if (tg3_flag(tp, PCI_EXPRESS) &&
- tg3_asic_rev(tp) != ASIC_REV_5785 &&
- !tg3_flag(tp, 57765_PLUS)) {
+ if (tg3_flag(tp, PCI_EXPRESS)) {
u32 cfg3;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
- if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS) &&
+ (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
tg3_flag_set(tp, ASPM_WORKAROUND);
+ if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
+ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
+ if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
+ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
}
if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
@@ -14450,6 +14870,12 @@ static int tg3_phy_probe(struct tg3 *tp)
}
}
+ if (!tg3_flag(tp, ENABLE_ASF) &&
+ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
if (tg3_flag(tp, USE_PHYLIB))
return tg3_phy_init(tp);
@@ -14515,6 +14941,7 @@ static int tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
(tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5762 ||
(tg3_asic_rev(tp) == ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -14524,7 +14951,8 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_phy_init_link_config(tp);
- if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
u32 bmsr, dummy;
@@ -15300,7 +15728,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- tg3_flag_set(tp, TSO_BUG);
+ tg3_flag_set(tp, FW_TSO);
+ tg3_flag_set(tp, TSO_BUG);
if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -15311,7 +15740,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- tp->fw_needed) {
+ tg3_flag(tp, FW_TSO)) {
/* For firmware TSO, assume ASF is disabled.
* We'll disable TSO later if we discover ASF
* is enabled in tg3_get_eeprom_hw_cfg().
@@ -15326,6 +15755,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ tp->fw_needed = FIRMWARE_TG357766;
+
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
@@ -15598,7 +16030,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -15786,6 +16218,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
udelay(50);
tg3_nvram_init(tp);
+ /* If the device has an NVRAM, no need to load patch firmware */
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+ !tg3_flag(tp, NO_NVRAM))
+ tp->fw_needed = NULL;
+
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
@@ -16144,7 +16581,7 @@ out:
}
static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
- int size, int to_device)
+ int size, bool to_device)
{
struct tg3_internal_buffer_desc test_desc;
u32 sram_dma_descs;
@@ -16344,7 +16781,7 @@ static int tg3_test_dma(struct tg3 *tp)
p[i] = i;
/* Send the buffer to the chip. */
- ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
if (ret) {
dev_err(&tp->pdev->dev,
"%s: Buffer write failed. err = %d\n",
@@ -16367,7 +16804,7 @@ static int tg3_test_dma(struct tg3 *tp)
}
#endif
/* Now read it back. */
- ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
if (ret) {
dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
"err = %d\n", __func__, ret);
@@ -16763,7 +17200,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
@@ -17048,7 +17485,7 @@ static int tg3_suspend(struct device *device)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, 1);
+ err2 = tg3_restart_hw(tp, true);
if (err2)
goto out;
@@ -17082,7 +17519,8 @@ static int tg3_resume(struct device *device)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp,
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
if (err)
goto out;
@@ -17098,15 +17536,9 @@ out:
return err;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
-#define TG3_PM_OPS (&tg3_pm_ops)
-
-#else
-
-#define TG3_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
/**
* tg3_io_error_detected - called when PCI error is detected
@@ -17221,7 +17653,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
@@ -17254,7 +17686,7 @@ static struct pci_driver tg3_driver = {
.probe = tg3_init_one,
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
- .driver.pm = TG3_PM_OPS,
+ .driver.pm = &tg3_pm_ops,
};
static int __init tg3_init(void)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d..9b2d3ac2474 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2198,6 +2198,8 @@
#define NIC_SRAM_DATA_CFG_3 0x00000d3c
#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
+#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000
+#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000
#define NIC_SRAM_DATA_CFG_4 0x00000d60
#define NIC_SRAM_GMII_MODE 0x00000002
@@ -2222,6 +2224,12 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
+#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
+#define TG3_57766_FW_BASE_ADDR 0x00030000
+#define TG3_57766_FW_HANDSHAKE 0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP 0x51
+
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -2365,6 +2373,13 @@
#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
+/* Serdes PHY Register Definitions */
+#define SERDES_TG3_1000X_STATUS 0x14
+#define SERDES_TG3_SGMII_MODE 0x0001
+#define SERDES_TG3_LINK_UP 0x0002
+#define SERDES_TG3_FULL_DUPLEX 0x0004
+#define SERDES_TG3_SPEED_100 0x0008
+#define SERDES_TG3_SPEED_1000 0x0010
/* APE registers. Accessible through BAR1 */
#define TG3_APE_GPIO_MSG 0x0008
@@ -3009,17 +3024,18 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_TSO_BUG,
TG3_FLAG_MAX_RXPEND_64,
- TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
+ TG3_FLAG_FW_TSO,
TG3_FLAG_HW_TSO_1,
TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_TSO_BUG,
TG3_FLAG_ICH_WORKAROUND,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3080,13 @@ enum TG3_FLAGS {
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
};
+struct tg3_firmware_hdr {
+ __be32 version; /* unused for fragments */
+ __be32 base_addr;
+ __be32 len;
+};
+#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
@@ -3267,6 +3290,7 @@ struct tg3 {
#define TG3_PHYFLG_IS_LOW_POWER 0x00000001
#define TG3_PHYFLG_IS_CONNECTED 0x00000002
#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004
+#define TG3_PHYFLG_USER_CONFIGURED 0x00000008
#define TG3_PHYFLG_PHY_SERDES 0x00000010
#define TG3_PHYFLG_MII_SERDES 0x00000020
#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \
@@ -3284,6 +3308,8 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
+#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;