summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c1380
1 files changed, 837 insertions, 543 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 367b6d46270..2486a656f12 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -35,8 +35,8 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#ifdef NETIF_F_HW_VLAN_TX
#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
@@ -47,6 +47,7 @@
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/zlib.h>
+#include <linux/log2.h>
#include "bnx2.h"
#include "bnx2_fw.h"
@@ -56,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.7.5"
-#define DRV_MODULE_RELDATE "April 29, 2008"
+#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_RELDATE "Aug 14, 2008"
#define RUN_AT(x) (jiffies + (x))
@@ -68,7 +69,7 @@ static char version[] __devinitdata =
"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -87,6 +88,7 @@ typedef enum {
BCM5708S,
BCM5709,
BCM5709S,
+ BCM5716,
} board_t;
/* indexed by board_t, above */
@@ -102,9 +104,10 @@ static struct {
{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+ { "Broadcom NetXtreme II BCM5716 1000Base-T" },
};
-static struct pci_device_id bnx2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
@@ -123,6 +126,8 @@ static struct pci_device_id bnx2_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+ { PCI_VENDOR_ID_BROADCOM, 0x163b,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
{ 0, }
};
@@ -226,7 +231,7 @@ static struct flash_spec flash_5709 = {
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
-static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
@@ -235,7 +240,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
/* The ring uses 256 indices for 255 entries, one of them
* needs to be skipped.
*/
- diff = bp->tx_prod - bnapi->tx_cons;
+ diff = txr->tx_prod - txr->tx_cons;
if (unlikely(diff >= TX_DESC_CNT)) {
diff &= 0xffff;
if (diff == TX_DESC_CNT)
@@ -289,7 +294,6 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
REG_WR(bp, BNX2_CTX_CTX_CTRL,
offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
for (i = 0; i < 5; i++) {
- u32 val;
val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
break;
@@ -488,7 +492,7 @@ bnx2_netif_start(struct bnx2 *bp)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
- netif_wake_queue(bp->dev);
+ netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
}
@@ -496,99 +500,162 @@ bnx2_netif_start(struct bnx2 *bp)
}
static void
-bnx2_free_mem(struct bnx2 *bp)
+bnx2_free_tx_mem(struct bnx2 *bp)
{
int i;
- for (i = 0; i < bp->ctx_pages; i++) {
- if (bp->ctx_blk[i]) {
- pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
- bp->ctx_blk[i],
- bp->ctx_blk_mapping[i]);
- bp->ctx_blk[i] = NULL;
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+ if (txr->tx_desc_ring) {
+ pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
+ txr->tx_desc_ring,
+ txr->tx_desc_mapping);
+ txr->tx_desc_ring = NULL;
}
+ kfree(txr->tx_buf_ring);
+ txr->tx_buf_ring = NULL;
}
- if (bp->status_blk) {
- pci_free_consistent(bp->pdev, bp->status_stats_size,
- bp->status_blk, bp->status_blk_mapping);
- bp->status_blk = NULL;
- bp->stats_blk = NULL;
+}
+
+static void
+bnx2_free_rx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
+
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ if (rxr->rx_desc_ring[j])
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
+ rxr->rx_desc_ring[j],
+ rxr->rx_desc_mapping[j]);
+ rxr->rx_desc_ring[j] = NULL;
+ }
+ if (rxr->rx_buf_ring)
+ vfree(rxr->rx_buf_ring);
+ rxr->rx_buf_ring = NULL;
+
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ if (rxr->rx_pg_desc_ring[j])
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
+ rxr->rx_pg_desc_ring[i],
+ rxr->rx_pg_desc_mapping[i]);
+ rxr->rx_pg_desc_ring[i] = NULL;
+ }
+ if (rxr->rx_pg_ring)
+ vfree(rxr->rx_pg_ring);
+ rxr->rx_pg_ring = NULL;
}
- if (bp->tx_desc_ring) {
- pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
- bp->tx_desc_ring, bp->tx_desc_mapping);
- bp->tx_desc_ring = NULL;
- }
- kfree(bp->tx_buf_ring);
- bp->tx_buf_ring = NULL;
- for (i = 0; i < bp->rx_max_ring; i++) {
- if (bp->rx_desc_ring[i])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- bp->rx_desc_ring[i],
- bp->rx_desc_mapping[i]);
- bp->rx_desc_ring[i] = NULL;
- }
- vfree(bp->rx_buf_ring);
- bp->rx_buf_ring = NULL;
- for (i = 0; i < bp->rx_max_pg_ring; i++) {
- if (bp->rx_pg_desc_ring[i])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- bp->rx_pg_desc_ring[i],
- bp->rx_pg_desc_mapping[i]);
- bp->rx_pg_desc_ring[i] = NULL;
- }
- if (bp->rx_pg_ring)
- vfree(bp->rx_pg_ring);
- bp->rx_pg_ring = NULL;
}
static int
-bnx2_alloc_mem(struct bnx2 *bp)
+bnx2_alloc_tx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+ txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
+ if (txr->tx_buf_ring == NULL)
+ return -ENOMEM;
+
+ txr->tx_desc_ring =
+ pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
+ &txr->tx_desc_mapping);
+ if (txr->tx_desc_ring == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+bnx2_alloc_rx_mem(struct bnx2 *bp)
{
- int i, status_blk_size;
+ int i;
- bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
- if (bp->tx_buf_ring == NULL)
- return -ENOMEM;
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
- bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
- &bp->tx_desc_mapping);
- if (bp->tx_desc_ring == NULL)
- goto alloc_mem_err;
+ rxr->rx_buf_ring =
+ vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ if (rxr->rx_buf_ring == NULL)
+ return -ENOMEM;
- bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
- if (bp->rx_buf_ring == NULL)
- goto alloc_mem_err;
+ memset(rxr->rx_buf_ring, 0,
+ SW_RXBD_RING_SIZE * bp->rx_max_ring);
- memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ rxr->rx_desc_ring[j] =
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
+ &rxr->rx_desc_mapping[j]);
+ if (rxr->rx_desc_ring[j] == NULL)
+ return -ENOMEM;
- for (i = 0; i < bp->rx_max_ring; i++) {
- bp->rx_desc_ring[i] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &bp->rx_desc_mapping[i]);
- if (bp->rx_desc_ring[i] == NULL)
- goto alloc_mem_err;
+ }
- }
+ if (bp->rx_pg_ring_size) {
+ rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ if (rxr->rx_pg_ring == NULL)
+ return -ENOMEM;
- if (bp->rx_pg_ring_size) {
- bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
- if (bp->rx_pg_ring == NULL)
- goto alloc_mem_err;
+ memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ }
- memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ rxr->rx_pg_desc_ring[j] =
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
+ &rxr->rx_pg_desc_mapping[j]);
+ if (rxr->rx_pg_desc_ring[j] == NULL)
+ return -ENOMEM;
+
+ }
}
+ return 0;
+}
+
+static void
+bnx2_free_mem(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
- for (i = 0; i < bp->rx_max_pg_ring; i++) {
- bp->rx_pg_desc_ring[i] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &bp->rx_pg_desc_mapping[i]);
- if (bp->rx_pg_desc_ring[i] == NULL)
- goto alloc_mem_err;
+ bnx2_free_tx_mem(bp);
+ bnx2_free_rx_mem(bp);
+ for (i = 0; i < bp->ctx_pages; i++) {
+ if (bp->ctx_blk[i]) {
+ pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+ bp->ctx_blk[i],
+ bp->ctx_blk_mapping[i]);
+ bp->ctx_blk[i] = NULL;
+ }
+ }
+ if (bnapi->status_blk.msi) {
+ pci_free_consistent(bp->pdev, bp->status_stats_size,
+ bnapi->status_blk.msi,
+ bp->status_blk_mapping);
+ bnapi->status_blk.msi = NULL;
+ bp->stats_blk = NULL;
}
+}
+
+static int
+bnx2_alloc_mem(struct bnx2 *bp)
+{
+ int i, status_blk_size, err;
+ struct bnx2_napi *bnapi;
+ void *status_blk;
/* Combine status and statistics blocks into one allocation. */
status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
@@ -598,27 +665,37 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
- &bp->status_blk_mapping);
- if (bp->status_blk == NULL)
+ status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
+ &bp->status_blk_mapping);
+ if (status_blk == NULL)
goto alloc_mem_err;
- memset(bp->status_blk, 0, bp->status_stats_size);
+ memset(status_blk, 0, bp->status_stats_size);
- bp->bnx2_napi[0].status_blk = bp->status_blk;
+ bnapi = &bp->bnx2_napi[0];
+ bnapi->status_blk.msi = status_blk;
+ bnapi->hw_tx_cons_ptr =
+ &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+ bnapi->hw_rx_cons_ptr =
+ &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
if (bp->flags & BNX2_FLAG_MSIX_CAP) {
for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
- struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct status_block_msix *sblk;
+
+ bnapi = &bp->bnx2_napi[i];
- bnapi->status_blk_msix = (void *)
- ((unsigned long) bp->status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (void *) (status_blk +
+ BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ bnapi->status_blk.msix = sblk;
+ bnapi->hw_tx_cons_ptr =
+ &sblk->status_tx_quick_consumer_index;
+ bnapi->hw_rx_cons_ptr =
+ &sblk->status_rx_quick_consumer_index;
bnapi->int_num = i << 24;
}
}
- bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
- status_blk_size);
+ bp->stats_blk = status_blk + status_blk_size;
bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
@@ -634,6 +711,15 @@ bnx2_alloc_mem(struct bnx2 *bp)
goto alloc_mem_err;
}
}
+
+ err = bnx2_alloc_rx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
+ err = bnx2_alloc_tx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
return 0;
alloc_mem_err:
@@ -993,9 +1079,9 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
static void
-bnx2_init_rx_context0(struct bnx2 *bp)
+bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
{
- u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
+ u32 val, rx_cid_addr = GET_CID_ADDR(cid);
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
@@ -1028,6 +1114,19 @@ bnx2_init_rx_context0(struct bnx2 *bp)
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
}
+static void
+bnx2_init_all_rx_contexts(struct bnx2 *bp)
+{
+ int i;
+ u32 cid;
+
+ for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
+ if (i == 1)
+ cid = RX_RSS_CID;
+ bnx2_init_rx_context(bp, cid);
+ }
+}
+
static int
bnx2_set_mac_link(struct bnx2 *bp)
{
@@ -1093,7 +1192,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- bnx2_init_rx_context0(bp);
+ bnx2_init_all_rx_contexts(bp);
return 0;
}
@@ -1392,7 +1491,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
return adv;
}
-static int bnx2_fw_sync(struct bnx2 *, u32, int);
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
static int
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
@@ -1445,7 +1544,7 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
spin_unlock_bh(&bp->phy_lock);
- bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
spin_lock_bh(&bp->phy_lock);
return 0;
@@ -1875,7 +1974,7 @@ bnx2_setup_phy(struct bnx2 *bp, u8 port)
}
static int
-bnx2_init_5709s_phy(struct bnx2 *bp)
+bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
@@ -1890,7 +1989,8 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
@@ -1924,11 +2024,12 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_5708s_phy(struct bnx2 *bp)
+bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bp->mii_up1 = BCM5708S_UP1;
@@ -1981,9 +2082,10 @@ bnx2_init_5708s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_5706s_phy(struct bnx2 *bp)
+bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
{
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
@@ -2018,11 +2120,12 @@ bnx2_init_5706s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_copper_phy(struct bnx2 *bp)
+bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
bnx2_write_phy(bp, 0x18, 0x0c00);
@@ -2070,7 +2173,7 @@ bnx2_init_copper_phy(struct bnx2 *bp)
static int
-bnx2_init_phy(struct bnx2 *bp)
+bnx2_init_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
int rc = 0;
@@ -2096,14 +2199,14 @@ bnx2_init_phy(struct bnx2 *bp)
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
if (CHIP_NUM(bp) == CHIP_NUM_5706)
- rc = bnx2_init_5706s_phy(bp);
+ rc = bnx2_init_5706s_phy(bp, reset_phy);
else if (CHIP_NUM(bp) == CHIP_NUM_5708)
- rc = bnx2_init_5708s_phy(bp);
+ rc = bnx2_init_5708s_phy(bp, reset_phy);
else if (CHIP_NUM(bp) == CHIP_NUM_5709)
- rc = bnx2_init_5709s_phy(bp);
+ rc = bnx2_init_5709s_phy(bp, reset_phy);
}
else {
- rc = bnx2_init_copper_phy(bp);
+ rc = bnx2_init_copper_phy(bp, reset_phy);
}
setup_phy:
@@ -2159,7 +2262,7 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
}
static int
-bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
{
int i;
u32 val;
@@ -2169,6 +2272,9 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+ if (!ack)
+ return 0;
+
/* wait for an acknowledgement. */
for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
msleep(10);
@@ -2345,28 +2451,27 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp)
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
{
u32 val;
- u8 *mac_addr = bp->dev->dev_addr;
val = (mac_addr[0] << 8) | mac_addr[1];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
}
static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
dma_addr_t mapping;
- struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct rx_bd *rxbd =
- &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+ &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
@@ -2381,9 +2486,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
}
static void
-bnx2_free_rx_page(struct bnx2 *bp, u16 index)
+bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
- struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct page *page = rx_pg->page;
if (!page)
@@ -2397,12 +2502,12 @@ bnx2_free_rx_page(struct bnx2 *bp, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
struct sk_buff *skb;
- struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
- struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -2422,7 +2527,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
- bnapi->rx_prod_bseq += bp->rx_buf_use_size;
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
return 0;
}
@@ -2430,7 +2535,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
static int
bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
u32 new_link_state, old_link_state;
int is_set = 1;
@@ -2466,11 +2571,9 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
{
u16 cons;
- if (bnapi->int_num == 0)
- cons = bnapi->status_blk->status_tx_quick_consumer_index0;
- else
- cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
-
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_tx_cons_ptr;
if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
cons++;
return cons;
@@ -2479,11 +2582,16 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
static int
bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
- int tx_pkt = 0;
+ int tx_pkt = 0, index;
+ struct netdev_queue *txq;
+
+ index = (bnapi - bp->bnx2_napi);
+ txq = netdev_get_tx_queue(bp->dev, index);
hw_cons = bnx2_get_hw_tx_cons(bnapi);
- sw_cons = bnapi->tx_cons;
+ sw_cons = txr->tx_cons;
while (sw_cons != hw_cons) {
struct sw_bd *tx_buf;
@@ -2492,7 +2600,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_ring_cons = TX_RING_IDX(sw_cons);
- tx_buf = &bp->tx_buf_ring[sw_ring_cons];
+ tx_buf = &txr->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
/* partial BD completions possible with TSO packets */
@@ -2522,7 +2630,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
pci_unmap_page(bp->pdev,
pci_unmap_addr(
- &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
+ &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
@@ -2538,44 +2646,46 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
- bnapi->hw_tx_cons = hw_cons;
- bnapi->tx_cons = sw_cons;
+ txr->hw_tx_cons = hw_cons;
+ txr->tx_cons = sw_cons;
+
/* Need to make the tx_cons update visible to bnx2_start_xmit()
- * before checking for netif_queue_stopped(). Without the
+ * before checking for netif_tx_queue_stopped(). Without the
* memory barrier, there is a small possibility that bnx2_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
- if (unlikely(netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
- netif_tx_lock(bp->dev);
- if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
- netif_wake_queue(bp->dev);
- netif_tx_unlock(bp->dev);
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
+
return tx_pkt;
}
static void
-bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
struct sk_buff *skb, int count)
{
struct sw_pg *cons_rx_pg, *prod_rx_pg;
struct rx_bd *cons_bd, *prod_bd;
dma_addr_t mapping;
int i;
- u16 hw_prod = bnapi->rx_pg_prod, prod;
- u16 cons = bnapi->rx_pg_cons;
+ u16 hw_prod = rxr->rx_pg_prod, prod;
+ u16 cons = rxr->rx_pg_cons;
for (i = 0; i < count; i++) {
prod = RX_PG_RING_IDX(hw_prod);
- prod_rx_pg = &bp->rx_pg_ring[prod];
- cons_rx_pg = &bp->rx_pg_ring[cons];
- cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_rx_pg = &rxr->rx_pg_ring[prod];
+ cons_rx_pg = &rxr->rx_pg_ring[cons];
+ cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
if (i == 0 && skb) {
struct page *page;
@@ -2604,25 +2714,25 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
hw_prod = NEXT_RX_BD(hw_prod);
}
- bnapi->rx_pg_prod = hw_prod;
- bnapi->rx_pg_cons = cons;
+ rxr->rx_pg_prod = hw_prod;
+ rxr->rx_pg_cons = cons;
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
- u16 cons, u16 prod)
+bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ struct sk_buff *skb, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
- cons_rx_buf = &bp->rx_buf_ring[cons];
- prod_rx_buf = &bp->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
pci_unmap_addr(cons_rx_buf, mapping),
- bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
- bnapi->rx_prod_bseq += bp->rx_buf_use_size;
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
@@ -2632,33 +2742,33 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
pci_unmap_addr_set(prod_rx_buf, mapping,
pci_unmap_addr(cons_rx_buf, mapping));
- cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, bnapi, prod);
+ err = bnx2_alloc_rx_skb(bp, rxr, prod);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
- bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
return err;
}
- skb_reserve(skb, bp->rx_offset);
+ skb_reserve(skb, BNX2_RX_OFFSET);
pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -2668,8 +2778,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
- u16 pg_cons = bnapi->rx_pg_cons;
- u16 pg_prod = bnapi->rx_pg_prod;
+ u16 pg_cons = rxr->rx_pg_cons;
+ u16 pg_prod = rxr->rx_pg_prod;
frag_size = len + 4 - hdr_len;
pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
@@ -2680,9 +2790,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
if (unlikely(frag_len <= 4)) {
unsigned int tail = 4 - frag_len;
- bnapi->rx_pg_cons = pg_cons;
- bnapi->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
pages - i);
skb->len -= tail;
if (i == 0) {
@@ -2696,7 +2806,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
}
return 0;
}
- rx_pg = &bp->rx_pg_ring[pg_cons];
+ rx_pg = &rxr->rx_pg_ring[pg_cons];
pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
PAGE_SIZE, PCI_DMA_FROMDEVICE);
@@ -2707,11 +2817,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
rx_pg->page = NULL;
- err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
+ err = bnx2_alloc_rx_page(bp, rxr,
+ RX_PG_RING_IDX(pg_prod));
if (unlikely(err)) {
- bnapi->rx_pg_cons = pg_cons;
- bnapi->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
return err;
}
@@ -2724,8 +2835,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
pg_prod = NEXT_RX_BD(pg_prod);
pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
}
- bnapi->rx_pg_prod = pg_prod;
- bnapi->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ rxr->rx_pg_cons = pg_cons;
}
return 0;
}
@@ -2733,8 +2844,11 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
{
- u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
+ u16 cons;
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_rx_cons_ptr;
if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
cons++;
return cons;
@@ -2743,13 +2857,14 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
static int
bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
- sw_cons = bnapi->rx_cons;
- sw_prod = bnapi->rx_prod;
+ sw_cons = rxr->rx_cons;
+ sw_prod = rxr->rx_prod;
/* Memory barrier necessary as speculative reads of the rx
* buffer can be ahead of the index in the status block
@@ -2761,11 +2876,13 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u16 vtag = 0;
+ int hw_vlan __maybe_unused = 0;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
- rx_buf = &bp->rx_buf_ring[sw_ring_cons];
+ rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
rx_buf->skb = NULL;
@@ -2773,7 +2890,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_addr = pci_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
- bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
+ PCI_DMA_FROMDEVICE);
rx_hdr = (struct l2_fhdr *) skb->data;
len = rx_hdr->l2_fhdr_pkt_len;
@@ -2785,7 +2903,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
@@ -2803,27 +2921,47 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (len <= bp->rx_copy_thresh) {
struct sk_buff *new_skb;
- new_skb = netdev_alloc_skb(bp->dev, len + 2);
+ new_skb = netdev_alloc_skb(bp->dev, len + 6);
if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
- new_skb->data, len + 2);
- skb_reserve(new_skb, 2);
+ skb_copy_from_linear_data_offset(skb,
+ BNX2_RX_OFFSET - 6,
+ new_skb->data, len + 6);
+ skb_reserve(new_skb, 6);
skb_put(new_skb, len);
- bnx2_reuse_rx_skb(bp, bnapi, skb,
+ bnx2_reuse_rx_skb(bp, rxr, skb,
sw_ring_cons, sw_ring_prod);
skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
+ } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx;
+ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+ !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = rx_hdr->l2_fhdr_vlan_tag;
+#ifdef BCM_VLAN
+ if (bp->vlgrp)
+ hw_vlan = 1;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, 4);
+
+ memmove(ve, skb->data + 4, ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ len += 4;
+ }
+ }
+
skb->protocol = eth_type_trans(skb, bp->dev);
if ((len > (bp->dev->mtu + ETH_HLEN)) &&
@@ -2845,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
#ifdef BCM_VLAN
- if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- rx_hdr->l2_fhdr_vlan_tag);
- }
+ if (hw_vlan)
+ vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
else
#endif
netif_receive_skb(skb);
@@ -2869,16 +3005,15 @@ next_rx:
rmb();
}
}
- bnapi->rx_cons = sw_cons;
- bnapi->rx_prod = sw_prod;
+ rxr->rx_cons = sw_cons;
+ rxr->rx_prod = sw_prod;
if (pg_ring_used)
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
- bnapi->rx_pg_prod);
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
+ REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
mmiowb();
@@ -2892,11 +3027,11 @@ next_rx:
static irqreturn_t
bnx2_msi(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
- prefetch(bnapi->status_blk);
+ prefetch(bnapi->status_blk.msi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
@@ -2913,11 +3048,11 @@ bnx2_msi(int irq, void *dev_instance)
static irqreturn_t
bnx2_msi_1shot(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
- prefetch(bnapi->status_blk);
+ prefetch(bnapi->status_blk.msi);
/* Return here if interrupt is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -2931,10 +3066,10 @@ bnx2_msi_1shot(int irq, void *dev_instance)
static irqreturn_t
bnx2_interrupt(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
- struct status_block *sblk = bnapi->status_blk;
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
+ struct status_block *sblk = bnapi->status_blk.msi;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
@@ -2968,21 +3103,16 @@ bnx2_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static irqreturn_t
-bnx2_tx_msix(int irq, void *dev_instance)
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
-
- prefetch(bnapi->status_blk_msix);
-
- /* Return here if interrupt is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
- netif_rx_schedule(dev, &bnapi->napi);
- return IRQ_HANDLED;
+ if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+ (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+ return 1;
+ return 0;
}
#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
@@ -2991,10 +3121,9 @@ bnx2_tx_msix(int irq, void *dev_instance)
static inline int
bnx2_has_work(struct bnx2_napi *bnapi)
{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
- if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
- (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
+ if (bnx2_has_fast_work(bnapi))
return 1;
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
@@ -3004,33 +3133,9 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0;
}
-static int bnx2_tx_poll(struct napi_struct *napi, int budget)
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
- struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
- struct bnx2 *bp = bnapi->bp;
- int work_done = 0;
- struct status_block_msix *sblk = bnapi->status_blk_msix;
-
- do {
- work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
- if (unlikely(work_done >= budget))
- return work_done;
-
- bnapi->last_status_idx = sblk->status_idx;
- rmb();
- } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
-
- netif_rx_complete(bp->dev, napi);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
- return work_done;
-}
-
-static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
- int work_done, int budget)
-{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
u32 status_attn_bits = sblk->status_attn_bits;
u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
@@ -3046,24 +3151,60 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
REG_RD(bp, BNX2_HC_COMMAND);
}
+}
- if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+ int work_done, int budget)
+{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
bnx2_tx_int(bp, bnapi, 0);
- if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
+ if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
return work_done;
}
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+ while (1) {
+ work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+ if (unlikely(work_done >= budget))
+ break;
+
+ bnapi->last_status_idx = sblk->status_idx;
+ /* status idx must be read before checking for more work. */
+ rmb();
+ if (likely(!bnx2_has_fast_work(bnapi))) {
+
+ netif_rx_complete(bp->dev, napi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ break;
+ }
+ }
+ return work_done;
+}
+
static int bnx2_poll(struct napi_struct *napi, int budget)
{
struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
struct bnx2 *bp = bnapi->bp;
int work_done = 0;
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
while (1) {
+ bnx2_poll_link(bp, bnapi);
+
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
if (unlikely(work_done >= budget))
@@ -3106,6 +3247,7 @@ bnx2_set_rx_mode(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
u32 rx_mode, sort_mode;
+ struct dev_addr_list *uc_ptr;
int i;
spin_lock_bh(&bp->phy_lock);
@@ -3114,10 +3256,10 @@ bnx2_set_rx_mode(struct net_device *dev)
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
#ifdef BCM_VLAN
- if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
#else
- if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
#endif
if (dev->flags & IFF_PROMISC) {
@@ -3161,6 +3303,25 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
+ uc_ptr = NULL;
+ if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
+ rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+ sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+ BNX2_RPM_SORT_USER0_PROM_VLAN;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ uc_ptr = dev->uc_list;
+
+ /* Add all entries into to the match filter list */
+ for (i = 0; i < dev->uc_count; i++) {
+ bnx2_set_mac_addr(bp, uc_ptr->da_addr,
+ i + BNX2_START_UNICAST_ADDRESS_INDEX);
+ sort_mode |= (1 <<
+ (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+ uc_ptr = uc_ptr->next;
+ }
+
+ }
+
if (rx_mode != bp->rx_mode) {
bp->rx_mode = rx_mode;
REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
@@ -3213,7 +3374,7 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
}
static int
-load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
+load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
{
u32 offset;
u32 val;
@@ -3297,7 +3458,6 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
static int
bnx2_init_cpus(struct bnx2 *bp)
{
- struct cpu_reg cpu_reg;
struct fw_info *fw;
int rc, rv2p_len;
void *text, *rv2p;
@@ -3333,122 +3493,57 @@ bnx2_init_cpus(struct bnx2 *bp)
load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
/* Initialize the RX Processor. */
- cpu_reg.mode = BNX2_RXP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_RXP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_RXP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_rxp_fw_09;
else
fw = &bnx2_rxp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
if (rc)
goto init_cpu_err;
/* Initialize the TX Processor. */
- cpu_reg.mode = BNX2_TXP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_TXP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_TXP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_txp_fw_09;
else
fw = &bnx2_txp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
if (rc)
goto init_cpu_err;
/* Initialize the TX Patch-up Processor. */
- cpu_reg.mode = BNX2_TPAT_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_TPAT_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_tpat_fw_09;
else
fw = &bnx2_tpat_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
if (rc)
goto init_cpu_err;
/* Initialize the Completion Processor. */
- cpu_reg.mode = BNX2_COM_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_COM_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_COM_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_com_fw_09;
else
fw = &bnx2_com_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_com, fw);
if (rc)
goto init_cpu_err;
/* Initialize the Command Processor. */
- cpu_reg.mode = BNX2_CP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_CP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_CP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_cp_fw_09;
else
fw = &bnx2_cp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
init_cpu_err:
vfree(text);
@@ -3511,7 +3606,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
bp->autoneg = autoneg;
bp->advertising = advertising;
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = REG_RD(bp, BNX2_EMAC_MODE);
@@ -3562,7 +3657,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
}
if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
+ 1, 0);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
@@ -4203,35 +4299,43 @@ nvram_write_end:
}
static void
-bnx2_init_remote_phy(struct bnx2 *bp)
+bnx2_init_fw_cap(struct bnx2 *bp)
{
- u32 val;
+ u32 val, sig = 0;
bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
- if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
- return;
+ bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+ if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
return;
- if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
+ if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+ }
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+ u32 link;
+
bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
- val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
- if (val & BNX2_LINK_STATUS_SERDES_LINK)
+ link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+ if (link & BNX2_LINK_STATUS_SERDES_LINK)
bp->phy_port = PORT_FIBRE;
else
bp->phy_port = PORT_TP;
- if (netif_running(bp->dev)) {
- u32 sig;
-
- sig = BNX2_DRV_ACK_CAP_SIGNATURE |
- BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
- bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
- }
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+ BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
}
+
+ if (netif_running(bp->dev) && sig)
+ bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
}
static void
@@ -4261,7 +4365,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
udelay(5);
/* Wait for the firmware to tell us it is ok to issue a reset. */
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
/* Deposit a driver reset signature so the firmware knows that
* this is a soft reset. */
@@ -4322,13 +4426,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
}
/* Wait for the firmware to finish its initialization. */
- rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
+ rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
if (rc)
return rc;
spin_lock_bh(&bp->phy_lock);
old_port = bp->phy_port;
- bnx2_init_remote_phy(bp);
+ bnx2_init_fw_cap(bp);
if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
old_port != bp->phy_port)
bnx2_set_default_remote_link(bp);
@@ -4412,7 +4516,7 @@ bnx2_init_chip(struct bnx2 *bp)
bnx2_init_nvram(bp);
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = REG_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
@@ -4498,15 +4602,25 @@ bnx2_init_chip(struct bnx2 *bp)
BNX2_HC_CONFIG_COLLECT_STATS;
}
- if (bp->flags & BNX2_FLAG_USING_MSIX) {
- u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
- BNX2_HC_SB_CONFIG_1;
-
+ if (bp->irq_nvecs > 1) {
REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
BNX2_HC_MSIX_BIT_VECTOR_VAL);
+ val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+ }
+
+ if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+ val |= BNX2_HC_CONFIG_ONE_SHOT;
+
+ REG_WR(bp, BNX2_HC_CONFIG, val);
+
+ for (i = 1; i < bp->irq_nvecs; i++) {
+ u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+ BNX2_HC_SB_CONFIG_1;
+
REG_WR(bp, base,
BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+ BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
BNX2_HC_SB_CONFIG_1_ONE_SHOT);
REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
@@ -4516,13 +4630,13 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
(bp->tx_ticks_int << 16) | bp->tx_ticks);
- val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
- }
-
- if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
- val |= BNX2_HC_CONFIG_ONE_SHOT;
+ REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+ (bp->rx_quick_cons_trip_int << 16) |
+ bp->rx_quick_cons_trip);
- REG_WR(bp, BNX2_HC_CONFIG, val);
+ REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+ (bp->rx_ticks_int << 16) | bp->rx_ticks);
+ }
/* Clear internal stats counters. */
REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
@@ -4538,7 +4652,7 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
}
rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
- 0);
+ 1, 0);
REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
@@ -4554,23 +4668,27 @@ static void
bnx2_clear_ring_states(struct bnx2 *bp)
{
struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
int i;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
- bnapi->tx_cons = 0;
- bnapi->hw_tx_cons = 0;
- bnapi->rx_prod_bseq = 0;
- bnapi->rx_prod = 0;
- bnapi->rx_cons = 0;
- bnapi->rx_pg_prod = 0;
- bnapi->rx_pg_cons = 0;
+ txr->tx_cons = 0;
+ txr->hw_tx_cons = 0;
+ rxr->rx_prod_bseq = 0;
+ rxr->rx_prod = 0;
+ rxr->rx_cons = 0;
+ rxr->rx_pg_prod = 0;
+ rxr->rx_pg_cons = 0;
}
}
static void
-bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
{
u32 val, offset0, offset1, offset2, offset3;
u32 cid_addr = GET_CID_ADDR(cid);
@@ -4592,43 +4710,43 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
bnx2_ctx_wr(bp, cid_addr, offset1, val);
- val = (u64) bp->tx_desc_mapping >> 32;
+ val = (u64) txr->tx_desc_mapping >> 32;
bnx2_ctx_wr(bp, cid_addr, offset2, val);
- val = (u64) bp->tx_desc_mapping & 0xffffffff;
+ val = (u64) txr->tx_desc_mapping & 0xffffffff;
bnx2_ctx_wr(bp, cid_addr, offset3, val);
}
static void
-bnx2_init_tx_ring(struct bnx2 *bp)
+bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
{
struct tx_bd *txbd;
u32 cid = TX_CID;
struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
- bp->tx_vec = 0;
- if (bp->flags & BNX2_FLAG_USING_MSIX) {
- cid = TX_TSS_CID;
- bp->tx_vec = BNX2_TX_VEC;
- REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
- (TX_TSS_CID << 7));
- }
- bnapi = &bp->bnx2_napi[bp->tx_vec];
+ bnapi = &bp->bnx2_napi[ring_num];
+ txr = &bnapi->tx_ring;
+
+ if (ring_num == 0)
+ cid = TX_CID;
+ else
+ cid = TX_TSS_CID + ring_num - 1;
bp->tx_wake_thresh = bp->tx_ring_size / 2;
- txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
+ txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
- txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
- txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
+ txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
+ txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
- bp->tx_prod = 0;
- bp->tx_prod_bseq = 0;
+ txr->tx_prod = 0;
+ txr->tx_prod_bseq = 0;
- bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
- bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
+ txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+ txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
- bnx2_init_tx_context(bp, cid);
+ bnx2_init_tx_context(bp, cid, txr);
}
static void
@@ -4656,17 +4774,25 @@ bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
}
static void
-bnx2_init_rx_ring(struct bnx2 *bp)
+bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
{
int i;
u16 prod, ring_prod;
- u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ u32 cid, rx_cid_addr, val;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if (ring_num == 0)
+ cid = RX_CID;
+ else
+ cid = RX_RSS_CID + ring_num - 1;
+
+ rx_cid_addr = GET_CID_ADDR(cid);
- bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
+ bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
bp->rx_buf_use_size, bp->rx_max_ring);
- bnx2_init_rx_context0(bp);
+ bnx2_init_rx_context(bp, cid);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
@@ -4675,54 +4801,101 @@ bnx2_init_rx_ring(struct bnx2 *bp)
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
if (bp->rx_pg_ring_size) {
- bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
- bp->rx_pg_desc_mapping,
+ bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
+ rxr->rx_pg_desc_mapping,
PAGE_SIZE, bp->rx_max_pg_ring);
val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
- BNX2_L2CTX_RBDC_JUMBO_KEY);
+ BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
- val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
+ val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
- val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
+ val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
}
- val = (u64) bp->rx_desc_mapping[0] >> 32;
+ val = (u64) rxr->rx_desc_mapping[0] >> 32;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
- val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
+ val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
- ring_prod = prod = bnapi->rx_pg_prod;
+ ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
- if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
break;
prod = NEXT_RX_BD(prod);
ring_prod = RX_PG_RING_IDX(prod);
}
- bnapi->rx_pg_prod = prod;
+ rxr->rx_pg_prod = prod;
- ring_prod = prod = bnapi->rx_prod;
+ ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
break;
- }
prod = NEXT_RX_BD(prod);
ring_prod = RX_RING_IDX(prod);
}
- bnapi->rx_prod = prod;
+ rxr->rx_prod = prod;
+
+ rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
+ rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
+ rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
- bnapi->rx_pg_prod);
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+ REG_WR16(bp, rxr->rx_bidx_addr, prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+}
+
+static void
+bnx2_init_all_rings(struct bnx2 *bp)
+{
+ int i;
+ u32 val;
+
+ bnx2_clear_ring_states(bp);
+
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+ for (i = 0; i < bp->num_tx_rings; i++)
+ bnx2_init_tx_ring(bp, i);
+
+ if (bp->num_tx_rings > 1)
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+ (TX_TSS_CID << 7));
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+ bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
+ for (i = 0; i < bp->num_rx_rings; i++)
+ bnx2_init_rx_ring(bp, i);
+
+ if (bp->num_rx_rings > 1) {
+ u32 tbl_32;
+ u8 *tbl = (u8 *) &tbl_32;
+
+ bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
+ BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
+
+ for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+ tbl[i % 4] = i % (bp->num_rx_rings - 1);
+ if ((i % 4) == 3)
+ bnx2_reg_wr_ind(bp,
+ BNX2_RXP_SCRATCH_RSS_TBL + i,
+ cpu_to_be32(tbl_32));
+ }
+
+ val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+ BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+ }
}
static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
@@ -4750,12 +4923,12 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
u32 rx_size, rx_space, jumbo_size;
/* 8 for CRC and VLAN */
- rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
+ rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
sizeof(struct skb_shared_info);
- bp->rx_copy_thresh = RX_COPY_THRESH;
+ bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
bp->rx_max_pg_ring = 0;
bp->rx_max_pg_ring_idx = 0;
@@ -4770,14 +4943,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
MAX_RX_PG_RINGS);
bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
- rx_size = RX_COPY_THRESH + bp->rx_offset;
+ rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
bp->rx_copy_thresh = 0;
}
bp->rx_buf_use_size = rx_size;
/* hw alignment */
bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
- bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
+ bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
@@ -4788,36 +4961,42 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
{
int i;
- if (bp->tx_buf_ring == NULL)
- return;
-
- for (i = 0; i < TX_DESC_CNT; ) {
- struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
- struct sk_buff *skb = tx_buf->skb;
- int j, last;
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ int j;
- if (skb == NULL) {
- i++;
+ if (txr->tx_buf_ring == NULL)
continue;
- }
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ for (j = 0; j < TX_DESC_CNT; ) {
+ struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (skb == NULL) {
+ j++;
+ continue;
+ }
+
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
- tx_buf->skb = NULL;
+ tx_buf->skb = NULL;
- last = skb_shinfo(skb)->nr_frags;
- for (j = 0; j < last; j++) {
- tx_buf = &bp->tx_buf_ring[i + j + 1];
- pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[j].size,
- PCI_DMA_TODEVICE);
+ last = skb_shinfo(skb)->nr_frags;
+ for (k = 0; k < last; k++) {
+ tx_buf = &txr->tx_buf_ring[j + k + 1];
+ pci_unmap_page(bp->pdev,
+ pci_unmap_addr(tx_buf, mapping),
+ skb_shinfo(skb)->frags[j].size,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ j += k + 1;
}
- dev_kfree_skb(skb);
- i += j + 1;
}
-
}
static void
@@ -4825,25 +5004,33 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
{
int i;
- if (bp->rx_buf_ring == NULL)
- return;
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
- for (i = 0; i < bp->rx_max_ring_idx; i++) {
- struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ if (rxr->rx_buf_ring == NULL)
+ return;
- if (skb == NULL)
- continue;
+ for (j = 0; j < bp->rx_max_ring_idx; j++) {
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+ struct sk_buff *skb = rx_buf->skb;
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+ if (skb == NULL)
+ continue;
- rx_buf->skb = NULL;
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
- dev_kfree_skb(skb);
+ rx_buf->skb = NULL;
+
+ dev_kfree_skb(skb);
+ }
+ for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
+ bnx2_free_rx_page(bp, rxr, j);
}
- for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
- bnx2_free_rx_page(bp, i);
}
static void
@@ -4866,14 +5053,12 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
if ((rc = bnx2_init_chip(bp)) != 0)
return rc;
- bnx2_clear_ring_states(bp);
- bnx2_init_tx_ring(bp);
- bnx2_init_rx_ring(bp);
+ bnx2_init_all_rings(bp);
return 0;
}
static int
-bnx2_init_nic(struct bnx2 *bp)
+bnx2_init_nic(struct bnx2 *bp, int reset_phy)
{
int rc;
@@ -4881,7 +5066,7 @@ bnx2_init_nic(struct bnx2 *bp)
return rc;
spin_lock_bh(&bp->phy_lock);
- bnx2_init_phy(bp);
+ bnx2_init_phy(bp, reset_phy);
bnx2_set_link(bp);
if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
bnx2_remote_phy_event(bp);
@@ -5141,11 +5326,13 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
tx_napi = bnapi;
- if (bp->flags & BNX2_FLAG_USING_MSIX)
- tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
+ txr = &tx_napi->tx_ring;
+ rxr = &bnapi->rx_ring;
if (loopback_mode == BNX2_MAC_LOOPBACK) {
bp->loopback = MAC_LOOPBACK;
bnx2_set_mac_loopback(bp);
@@ -5183,7 +5370,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
num_pkts = 0;
- txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
+ txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
txbd->tx_bd_haddr_hi = (u64) map >> 32;
txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5191,11 +5378,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
num_pkts++;
- bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
- bp->tx_prod_bseq += pkt_size;
+ txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
+ txr->tx_prod_bseq += pkt_size;
- REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
- REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
+ REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
udelay(100);
@@ -5209,7 +5396,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
+ if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
goto loopback_test_done;
rx_idx = bnx2_get_hw_rx_cons(bnapi);
@@ -5217,11 +5404,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
goto loopback_test_done;
}
- rx_buf = &bp->rx_buf_ring[rx_start_idx];
+ rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
rx_hdr = (struct l2_fhdr *) rx_skb->data;
- skb_reserve(rx_skb, bp->rx_offset);
+ skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
pci_unmap_addr(rx_buf, mapping),
@@ -5269,7 +5456,7 @@ bnx2_test_loopback(struct bnx2 *bp)
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
spin_lock_bh(&bp->phy_lock);
- bnx2_init_phy(bp);
+ bnx2_init_phy(bp, 1);
spin_unlock_bh(&bp->phy_lock);
if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
rc |= BNX2_MAC_LOOPBACK_FAILED;
@@ -5531,7 +5718,6 @@ bnx2_restart_timer:
static int
bnx2_request_irq(struct bnx2 *bp)
{
- struct net_device *dev = bp->dev;
unsigned long flags;
struct bnx2_irq *irq;
int rc = 0, i;
@@ -5544,7 +5730,7 @@ bnx2_request_irq(struct bnx2 *bp)
for (i = 0; i < bp->irq_nvecs; i++) {
irq = &bp->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
- dev);
+ &bp->bnx2_napi[i]);
if (rc)
break;
irq->requested = 1;
@@ -5555,14 +5741,13 @@ bnx2_request_irq(struct bnx2 *bp)
static void
bnx2_free_irq(struct bnx2 *bp)
{
- struct net_device *dev = bp->dev;
struct bnx2_irq *irq;
int i;
for (i = 0; i < bp->irq_nvecs; i++) {
irq = &bp->irq_tbl[i];
if (irq->requested)
- free_irq(irq->vector, dev);
+ free_irq(irq->vector, &bp->bnx2_napi[i]);
irq->requested = 0;
}
if (bp->flags & BNX2_FLAG_USING_MSI)
@@ -5574,7 +5759,7 @@ bnx2_free_irq(struct bnx2 *bp)
}
static void
-bnx2_enable_msix(struct bnx2 *bp)
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
int i, rc;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
@@ -5587,21 +5772,16 @@ bnx2_enable_msix(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
+
+ strcpy(bp->irq_tbl[i].name, bp->dev->name);
+ bp->irq_tbl[i].handler = bnx2_msi_1shot;
}
rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
if (rc != 0)
return;
- bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
- bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
-
- strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
- strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
- strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
- strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
-
- bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
+ bp->irq_nvecs = msix_vecs;
bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->irq_tbl[i].vector = msix_ent[i].vector;
@@ -5610,13 +5790,16 @@ bnx2_enable_msix(struct bnx2 *bp)
static void
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
+ int cpus = num_online_cpus();
+ int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+
bp->irq_tbl[0].handler = bnx2_interrupt;
strcpy(bp->irq_tbl[0].name, bp->dev->name);
bp->irq_nvecs = 1;
bp->irq_tbl[0].vector = bp->pdev->irq;
- if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
- bnx2_enable_msix(bp);
+ if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
+ bnx2_enable_msix(bp, msix_vecs);
if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
!(bp->flags & BNX2_FLAG_USING_MSIX)) {
@@ -5631,6 +5814,11 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
bp->irq_tbl[0].vector = bp->pdev->irq;
}
}
+
+ bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+ bp->dev->real_num_tx_queues = bp->num_tx_rings;
+
+ bp->num_rx_rings = bp->irq_nvecs;
}
/* Called with rtnl_lock */
@@ -5645,29 +5833,19 @@ bnx2_open(struct net_device *dev)
bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
+ bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
if (rc)
- return rc;
+ goto open_err;
- bnx2_setup_int_mode(bp, disable_msi);
- bnx2_napi_enable(bp);
rc = bnx2_request_irq(bp);
+ if (rc)
+ goto open_err;
- if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_mem(bp);
- return rc;
- }
-
- rc = bnx2_init_nic(bp);
-
- if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_irq(bp);
- bnx2_free_skbs(bp);
- bnx2_free_mem(bp);
- return rc;
- }
+ rc = bnx2_init_nic(bp, 1);
+ if (rc)
+ goto open_err;
mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -5691,17 +5869,14 @@ bnx2_open(struct net_device *dev)
bnx2_setup_int_mode(bp, 1);
- rc = bnx2_init_nic(bp);
+ rc = bnx2_init_nic(bp, 0);
if (!rc)
rc = bnx2_request_irq(bp);
if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_skbs(bp);
- bnx2_free_mem(bp);
del_timer_sync(&bp->timer);
- return rc;
+ goto open_err;
}
bnx2_enable_int(bp);
}
@@ -5711,9 +5886,16 @@ bnx2_open(struct net_device *dev)
else if (bp->flags & BNX2_FLAG_USING_MSIX)
printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
+
+open_err:
+ bnx2_napi_disable(bp);
+ bnx2_free_skbs(bp);
+ bnx2_free_irq(bp);
+ bnx2_free_mem(bp);
+ return rc;
}
static void
@@ -5726,7 +5908,7 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
bnx2_netif_start(bp);
@@ -5752,6 +5934,8 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
bp->vlgrp = vlgrp;
bnx2_set_rx_mode(dev);
+ if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
bnx2_netif_start(bp);
}
@@ -5771,18 +5955,26 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
- struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
+ struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct netdev_queue *txq;
- if (unlikely(bnx2_tx_avail(bp, bnapi) <
+ /* Determine which tx ring we will be placed on */
+ i = skb_get_queue_mapping(skb);
+ bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+
+ if (unlikely(bnx2_tx_avail(bp, txr) <
(skb_shinfo(skb)->nr_frags + 1))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
dev->name);
return NETDEV_TX_BUSY;
}
len = skb_headlen(skb);
- prod = bp->tx_prod;
+ prod = txr->tx_prod;
ring_prod = TX_RING_IDX(prod);
vlan_tag_flags = 0;
@@ -5790,10 +5982,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
}
+#ifdef BCM_VLAN
if (bp->vlgrp && vlan_tx_tag_present(skb)) {
vlan_tag_flags |=
(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
}
+#endif
if ((mss = skb_shinfo(skb)->gso_size)) {
u32 tcp_opt_len, ip_tcp_len;
struct iphdr *iph;
@@ -5844,11 +6038,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- tx_buf = &bp->tx_buf_ring[ring_prod];
+ tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
pci_unmap_addr_set(tx_buf, mapping, mapping);
- txbd = &bp->tx_desc_ring[ring_prod];
+ txbd = &txr->tx_desc_ring[ring_prod];
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -5862,12 +6056,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
- txbd = &bp->tx_desc_ring[ring_prod];
+ txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
+ pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
mapping, mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -5879,20 +6073,20 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
prod = NEXT_TX_BD(prod);
- bp->tx_prod_bseq += skb->len;
+ txr->tx_prod_bseq += skb->len;
- REG_WR16(bp, bp->tx_bidx_addr, prod);
- REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
+ REG_WR16(bp, txr->tx_bidx_addr, prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
mmiowb();
- bp->tx_prod = prod;
+ txr->tx_prod = prod;
dev->trans_start = jiffies;
- if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
- netif_stop_queue(dev);
- if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
- netif_wake_queue(dev);
+ if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
+ netif_tx_stop_queue(txq);
+ if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
}
return NETDEV_TX_OK;
@@ -6095,6 +6289,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
!(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
goto err_out_unlock;
+ /* If device is down, we can store the settings only if the user
+ * is setting the currently active port.
+ */
+ if (!netif_running(dev) && cmd->port != bp->phy_port)
+ goto err_out_unlock;
+
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
@@ -6152,7 +6352,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->req_line_speed = req_line_speed;
bp->req_duplex = req_duplex;
- err = bnx2_setup_phy(bp, cmd->port);
+ err = 0;
+ /* If device is down, the new settings will be picked up when it is
+ * brought up.
+ */
+ if (netif_running(dev))
+ err = bnx2_setup_phy(bp, cmd->port);
err_out_unlock:
spin_unlock_bh(&bp->phy_lock);
@@ -6414,7 +6619,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 0);
bnx2_netif_start(bp);
}
@@ -6457,7 +6662,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
rc = bnx2_alloc_mem(bp);
if (rc)
return rc;
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 0);
bnx2_netif_start(bp);
}
return 0;
@@ -6725,7 +6930,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
}
else {
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
}
@@ -6951,7 +7156,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
return 0;
}
@@ -7108,6 +7313,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
}
pci_set_master(pdev);
+ pci_save_state(pdev);
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
@@ -7125,7 +7331,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
- mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;
@@ -7272,7 +7478,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg &= BNX2_CONDITION_MFW_RUN_MASK;
if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
reg != BNX2_CONDITION_MFW_RUN_NONE) {
- int i;
u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
bp->fw_version[j++] = ' ';
@@ -7294,8 +7499,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[4] = (u8) (reg >> 8);
bp->mac_addr[5] = (u8) reg;
- bp->rx_offset = sizeof(struct l2_fhdr) + 2;
-
bp->tx_ring_size = MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 255);
@@ -7345,8 +7548,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
}
- bnx2_init_remote_phy(bp);
-
} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
CHIP_NUM(bp) == CHIP_NUM_5708)
bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
@@ -7355,6 +7556,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
CHIP_REV(bp) == CHIP_REV_Bx))
bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
+ bnx2_init_fw_cap(bp);
+
if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B1)) {
@@ -7451,15 +7654,19 @@ static void __devinit
bnx2_init_napi(struct bnx2 *bp)
{
int i;
- struct bnx2_napi *bnapi;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
- bnapi = &bp->bnx2_napi[i];
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ int (*poll)(struct napi_struct *, int);
+
+ if (i == 0)
+ poll = bnx2_poll;
+ else
+ poll = bnx2_poll_msix;
+
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
bnapi->bp = bp;
}
- netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
- netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
- 64);
}
static int __devinit
@@ -7476,7 +7683,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO "%s", version);
/* dev zeroed in init_etherdev */
- dev = alloc_etherdev(sizeof(*bp));
+ dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
if (!dev)
return -ENOMEM;
@@ -7491,7 +7698,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hard_start_xmit = bnx2_start_xmit;
dev->stop = bnx2_close;
dev->get_stats = bnx2_get_stats;
- dev->set_multicast_list = bnx2_set_rx_mode;
+ dev->set_rx_mode = bnx2_set_rx_mode;
dev->do_ioctl = bnx2_ioctl;
dev->set_mac_address = bnx2_change_mac_addr;
dev->change_mtu = bnx2_change_mtu;
@@ -7612,11 +7819,97 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
return 0;
}
+/**
+ * bnx2_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev)) {
+ bnx2_netif_stop(bp);
+ del_timer_sync(&bp->timer);
+ bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+ }
+
+ pci_disable_device(pdev);
+ rtnl_unlock();
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev)) {
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_init_nic(bp, 1);
+ }
+
+ rtnl_unlock();
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_running(dev))
+ bnx2_netif_start(bp);
+
+ netif_device_attach(dev);
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2_err_handler = {
+ .error_detected = bnx2_io_error_detected,
+ .slot_reset = bnx2_io_slot_reset,
+ .resume = bnx2_io_resume,
+};
+
static struct pci_driver bnx2_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnx2_pci_tbl,
@@ -7624,6 +7917,7 @@ static struct pci_driver bnx2_pci_driver = {
.remove = __devexit_p(bnx2_remove_one),
.suspend = bnx2_suspend,
.resume = bnx2_resume,
+ .err_handler = &bnx2_err_handler,
};
static int __init bnx2_init(void)