summaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h161
-rw-r--r--drivers/net/qlge/qlge_dbg.c9
-rw-r--r--drivers/net/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/qlge/qlge_main.c630
-rw-r--r--drivers/net/qlge/qlge_mpi.c835
5 files changed, 1270 insertions, 369 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index aff9c5fec73..fcb159e4df5 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -28,8 +28,8 @@
} while (0)
#define QLGE_VENDOR_ID 0x1077
-#define QLGE_DEVICE_ID 0x8012
-
+#define QLGE_DEVICE_ID_8012 0x8012
+#define QLGE_DEVICE_ID_8000 0x8000
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -164,7 +164,7 @@ enum {
CSR_RP = (1 << 10),
CSR_CMD_PARM_SHIFT = 22,
CSR_CMD_NOP = 0x00000000,
- CSR_CMD_SET_RST = 0x1000000,
+ CSR_CMD_SET_RST = 0x10000000,
CSR_CMD_CLR_RST = 0x20000000,
CSR_CMD_SET_PAUSE = 0x30000000,
CSR_CMD_CLR_PAUSE = 0x40000000,
@@ -424,7 +424,7 @@ enum {
RX_SYMBOL_ERR = 0x00000370,
RX_MAC_ERR = 0x00000378,
RX_CTL_PKTS = 0x00000380,
- RX_PAUSE_PKTS = 0x00000384,
+ RX_PAUSE_PKTS = 0x00000388,
RX_64_PKTS = 0x00000390,
RX_65_TO_127_PKTS = 0x00000398,
RX_128_255_PKTS = 0x000003a0,
@@ -733,6 +733,11 @@ enum {
AEN_LINK_DOWN = 0x00008012,
AEN_IDC_CMPLT = 0x00008100,
AEN_IDC_REQ = 0x00008101,
+ AEN_IDC_EXT = 0x00008102,
+ AEN_DCBX_CHG = 0x00008110,
+ AEN_AEN_LOST = 0x00008120,
+ AEN_AEN_SFP_IN = 0x00008130,
+ AEN_AEN_SFP_OUT = 0x00008131,
AEN_FW_INIT_DONE = 0x00008400,
AEN_FW_INIT_FAIL = 0x00008401,
@@ -742,40 +747,48 @@ enum {
MB_CMD_MB_TEST = 0x00000006,
MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
MB_CMD_ABOUT_FW = 0x00000008,
+ MB_CMD_COPY_RISC_RAM = 0x0000000a,
MB_CMD_LOAD_RISC_RAM = 0x0000000b,
MB_CMD_DUMP_RISC_RAM = 0x0000000c,
MB_CMD_WRITE_RAM = 0x0000000d,
+ MB_CMD_INIT_RISC_RAM = 0x0000000e,
MB_CMD_READ_RAM = 0x0000000f,
MB_CMD_STOP_FW = 0x00000014,
MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+ MB_CMD_WRITE_SFP = 0x00000030,
+ MB_CMD_READ_SFP = 0x00000031,
MB_CMD_INIT_FW = 0x00000060,
- MB_CMD_GET_INIT_CB = 0x00000061,
+ MB_CMD_GET_IFCB = 0x00000061,
MB_CMD_GET_FW_STATE = 0x00000069,
MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
- MB_WOL_DISABLE = 0x00000000,
- MB_WOL_MAGIC_PKT = 0x00000001,
- MB_WOL_FLTR = 0x00000002,
- MB_WOL_UCAST = 0x00000004,
- MB_WOL_MCAST = 0x00000008,
- MB_WOL_BCAST = 0x00000010,
- MB_WOL_LINK_UP = 0x00000020,
- MB_WOL_LINK_DOWN = 0x00000040,
+ MB_WOL_DISABLE = 0,
+ MB_WOL_MAGIC_PKT = (1 << 1),
+ MB_WOL_FLTR = (1 << 2),
+ MB_WOL_UCAST = (1 << 3),
+ MB_WOL_MCAST = (1 << 4),
+ MB_WOL_BCAST = (1 << 5),
+ MB_WOL_LINK_UP = (1 << 6),
+ MB_WOL_LINK_DOWN = (1 << 7),
MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
- MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
+ MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
- MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */
+ MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
+ MB_CMD_SET_WOL_IMMED = 0x00000115,
MB_CMD_PORT_RESET = 0x00000120,
MB_CMD_SET_PORT_CFG = 0x00000122,
MB_CMD_GET_PORT_CFG = 0x00000123,
- MB_CMD_SET_ASIC_VOLTS = 0x00000130,
- MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */
+ MB_CMD_GET_LINK_STS = 0x00000124,
/* Mailbox Command Status. */
MB_CMD_STS_GOOD = 0x00004000, /* Success. */
MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
- MB_CMD_STS_ERR = 0x00004005, /* Error. */
+ MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
+ MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
+ MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
+ MB_CMD_STS_ERR = 0x00004005, /* System Error. */
+ MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
};
struct mbox_params {
@@ -785,7 +798,7 @@ struct mbox_params {
int out_count;
};
-struct flash_params {
+struct flash_params_8012 {
u8 dev_id_str[4];
__le16 size;
__le16 csum;
@@ -795,6 +808,43 @@ struct flash_params {
__le16 res;
};
+/* 8000 device's flash is a different structure
+ * at a different offset in flash.
+ */
+#define FUNC0_FLASH_OFFSET 0x140200
+#define FUNC1_FLASH_OFFSET 0x140600
+
+/* Flash related data structures. */
+struct flash_params_8000 {
+ u8 dev_id_str[4]; /* "8000" */
+ __le16 ver;
+ __le16 size;
+ __le16 csum;
+ __le16 reserved0;
+ __le16 total_size;
+ __le16 entry_count;
+ u8 data_type0;
+ u8 data_size0;
+ u8 mac_addr[6];
+ u8 data_type1;
+ u8 data_size1;
+ u8 mac_addr1[6];
+ u8 data_type2;
+ u8 data_size2;
+ __le16 vlan_id;
+ u8 data_type3;
+ u8 data_size3;
+ __le16 last;
+ u8 reserved1[464];
+ __le16 subsys_ven_id;
+ __le16 subsys_dev_id;
+ u8 reserved2[4];
+};
+
+union flash_params {
+ struct flash_params_8012 flash_params_8012;
+ struct flash_params_8000 flash_params_8000;
+};
/*
* doorbell space for the rx ring context
@@ -968,6 +1018,7 @@ struct ib_mac_iocb_rsp {
__le16 vlan_id; /* 12 bits */
#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
+#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
__le16 reserved1;
__le32 reserved2[6];
@@ -1033,6 +1084,7 @@ struct wqicb {
#define Q_LEN_CPP_16 0x0001
#define Q_LEN_CPP_32 0x0002
#define Q_LEN_CPP_64 0x0003
+#define Q_LEN_CPP_512 0x0006
__le16 flags;
#define Q_PRI_SHIFT 1
#define Q_FLAGS_LC 0x1000
@@ -1314,27 +1366,49 @@ enum {
QL_DMA64 = (1 << 5),
QL_PROMISCUOUS = (1 << 6),
QL_ALLMULTI = (1 << 7),
+ QL_PORT_CFG = (1 << 8),
+ QL_CAM_RT_SET = (1 << 9),
};
/* link_status bit definitions */
enum {
- LOOPBACK_MASK = 0x00000700,
- LOOPBACK_PCS = 0x00000100,
- LOOPBACK_HSS = 0x00000200,
- LOOPBACK_EXT = 0x00000300,
- PAUSE_MASK = 0x000000c0,
- PAUSE_STD = 0x00000040,
- PAUSE_PRI = 0x00000080,
- SPEED_MASK = 0x00000038,
- SPEED_100Mb = 0x00000000,
- SPEED_1Gb = 0x00000008,
- SPEED_10Gb = 0x00000010,
- LINK_TYPE_MASK = 0x00000007,
- LINK_TYPE_XFI = 0x00000001,
- LINK_TYPE_XAUI = 0x00000002,
- LINK_TYPE_XFI_BP = 0x00000003,
- LINK_TYPE_XAUI_BP = 0x00000004,
- LINK_TYPE_10GBASET = 0x00000005,
+ STS_LOOPBACK_MASK = 0x00000700,
+ STS_LOOPBACK_PCS = 0x00000100,
+ STS_LOOPBACK_HSS = 0x00000200,
+ STS_LOOPBACK_EXT = 0x00000300,
+ STS_PAUSE_MASK = 0x000000c0,
+ STS_PAUSE_STD = 0x00000040,
+ STS_PAUSE_PRI = 0x00000080,
+ STS_SPEED_MASK = 0x00000038,
+ STS_SPEED_100Mb = 0x00000000,
+ STS_SPEED_1Gb = 0x00000008,
+ STS_SPEED_10Gb = 0x00000010,
+ STS_LINK_TYPE_MASK = 0x00000007,
+ STS_LINK_TYPE_XFI = 0x00000001,
+ STS_LINK_TYPE_XAUI = 0x00000002,
+ STS_LINK_TYPE_XFI_BP = 0x00000003,
+ STS_LINK_TYPE_XAUI_BP = 0x00000004,
+ STS_LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/* link_config bit definitions */
+enum {
+ CFG_JUMBO_FRAME_SIZE = 0x00010000,
+ CFG_PAUSE_MASK = 0x00000060,
+ CFG_PAUSE_STD = 0x00000020,
+ CFG_PAUSE_PRI = 0x00000040,
+ CFG_DCBX = 0x00000010,
+ CFG_LOOPBACK_MASK = 0x00000007,
+ CFG_LOOPBACK_PCS = 0x00000002,
+ CFG_LOOPBACK_HSS = 0x00000004,
+ CFG_LOOPBACK_EXT = 0x00000006,
+ CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
+};
+
+struct nic_operations {
+
+ int (*get_flash) (struct ql_adapter *);
+ int (*port_initialize) (struct ql_adapter *);
};
/*
@@ -1377,6 +1451,8 @@ struct ql_adapter {
u32 mailbox_in;
u32 mailbox_out;
+ struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
@@ -1412,8 +1488,10 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
+ u32 link_config;
+ u32 max_frame_size;
- struct flash_params flash;
+ union flash_params flash;
struct net_device_stats stats;
struct workqueue_struct *q_workqueue;
@@ -1421,6 +1499,11 @@ struct ql_adapter {
struct delayed_work asic_reset_work;
struct delayed_work mpi_reset_work;
struct delayed_work mpi_work;
+ struct delayed_work mpi_port_cfg_work;
+ struct delayed_work mpi_idc_work;
+ struct completion ide_completion;
+ struct nic_operations *nic_ops;
+ u16 device_id;
};
/*
@@ -1493,6 +1576,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+void ql_mpi_idc_work(struct work_struct *work);
+void ql_mpi_port_cfg_work(struct work_struct *work);
+int ql_mb_get_fw_state(struct ql_adapter *qdev);
+int ql_cam_route_initialize(struct ql_adapter *qdev);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 379b895ed6e..40a70c36f5a 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -83,6 +83,10 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
{
int i;
u32 value[3];
+
+ i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (i)
+ return;
for (i = 0; i < 4; i++) {
if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
printk(KERN_ERR PFX
@@ -111,12 +115,16 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
qdev->ndev->name, i, value[1], value[0]);
}
}
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
void ql_dump_routing_entries(struct ql_adapter *qdev)
{
int i;
u32 value;
+ i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (i)
+ return;
for (i = 0; i < 16; i++) {
value = 0;
if (ql_get_routing_reg(qdev, i, &value)) {
@@ -131,6 +139,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
qdev->ndev->name, i, value);
}
}
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
void ql_dump_regs(struct ql_adapter *qdev)
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 9d922e2ff22..913b2a5fafc 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -33,7 +33,6 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include "qlge.h"
@@ -271,7 +270,8 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->advertising = ADVERTISED_10000baseT_Full;
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
- if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
+ if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
+ STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 91191f761fb..170d3540f9c 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -58,8 +58,8 @@ static const u32 default_msg =
NETIF_MSG_IFUP |
NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR |
- NETIF_MSG_TX_QUEUED |
- NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
+/* NETIF_MSG_TX_QUEUED | */
+/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
@@ -75,7 +75,8 @@ module_param(irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
{0,}
};
@@ -247,9 +248,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
u32 offset = 0;
int status;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC:
@@ -308,7 +306,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
status = -EPERM;
}
exit:
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -321,9 +318,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u32 offset = 0;
int status = 0;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC:
@@ -334,7 +328,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- QPRINTK(qdev, IFUP, INFO,
+ QPRINTK(qdev, IFUP, DEBUG,
"Adding %s address %pM"
" at index %d in the CAM.\n",
((type ==
@@ -415,7 +409,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
status = -EPERM;
}
exit:
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -426,10 +419,6 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
{
int status = 0;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- goto exit;
-
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
@@ -441,7 +430,6 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
goto exit;
*value = ql_read32(qdev, RT_DATA);
exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
@@ -453,13 +441,9 @@ exit:
static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int enable)
{
- int status;
+ int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
QPRINTK(qdev, IFUP, DEBUG,
"%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
(enable ? "Adding" : "Removing"),
@@ -555,7 +539,6 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
ql_write32(qdev, RT_DATA, enable ? mask : 0);
}
exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
@@ -604,7 +587,6 @@ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
- unsigned long hw_flags;
struct intr_context *ctx;
/* HW disables for us if we're MSIX multi interrupts and
@@ -614,14 +596,14 @@ static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
return 0;
ctx = qdev->intr_context + intr;
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ spin_lock(&qdev->hw_lock);
if (!atomic_read(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
ctx->intr_dis_mask);
var = ql_read32(qdev, STS);
}
atomic_inc(&ctx->irq_cnt);
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ spin_unlock(&qdev->hw_lock);
return var;
}
@@ -641,6 +623,28 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
}
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+ int status, i;
+ u16 csum = 0;
+ __le16 *flash = (__le16 *)&qdev->flash;
+
+ status = strncmp((char *)&qdev->flash, str, 4);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+ return status;
+ }
+
+ for (i = 0; i < size; i++)
+ csum += le16_to_cpu(*flash++);
+
+ if (csum)
+ QPRINTK(qdev, IFUP, ERR,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+
+ return csum;
+}
+
static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
@@ -665,23 +669,75 @@ exit:
return status;
}
-static int ql_get_flash_params(struct ql_adapter *qdev)
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+ u32 i, size;
+ int status;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset;
+
+ /* Get flash offset for function and adjust
+ * for dword access.
+ */
+ if (!qdev->func)
+ offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+ else
+ offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+ if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ return -ETIMEDOUT;
+
+ size = sizeof(struct flash_params_8000) / sizeof(u32);
+ for (i = 0; i < size; i++, p++) {
+ status = ql_read_flash_word(qdev, i+offset, p);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ goto exit;
+ }
+ }
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8000) / sizeof(u16),
+ "8000");
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (!is_valid_ether_addr(qdev->flash.flash_params_8000.mac_addr)) {
+ QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
+
+exit:
+ ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ return status;
+}
+
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
{
int i;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset = 0;
+ u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
/* Second function's parameters follow the first
* function's.
*/
if (qdev->func)
- offset = sizeof(qdev->flash) / sizeof(u32);
+ offset = size;
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
- for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
+ for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
@@ -689,6 +745,25 @@ static int ql_get_flash_params(struct ql_adapter *qdev)
}
}
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8012) / sizeof(u16),
+ "8012");
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ qdev->flash.flash_params_8012.mac_addr,
+ qdev->ndev->addr_len);
+
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
@@ -759,13 +834,25 @@ exit:
return status;
}
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_mb_get_fw_state(qdev);
+ if (status)
+ goto exit;
+ /* Wake up a worker to get/set the TX/RX frame sizes. */
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+ return status;
+}
+
/* Take the MAC Core out of reset.
* Enable statistics counting.
* Take the transmitter/receiver out of reset.
* This functionality may be done in the MPI firmware at a
* later date.
*/
-static int ql_port_initialize(struct ql_adapter *qdev)
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
{
int status = 0;
u32 data;
@@ -881,7 +968,8 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
/* Process (refill) a large buffer queue. */
static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- int clean_idx = rx_ring->lbq_clean_idx;
+ u32 clean_idx = rx_ring->lbq_clean_idx;
+ u32 start_idx = clean_idx;
struct bq_desc *lbq_desc;
u64 map;
int i;
@@ -928,19 +1016,23 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->lbq_prod_idx += 16;
if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"lbq: updating prod idx = %d.\n",
rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
- rx_ring->lbq_free_cnt -= 16;
}
}
/* Process (refill) a small buffer queue. */
static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- int clean_idx = rx_ring->sbq_clean_idx;
+ u32 clean_idx = rx_ring->sbq_clean_idx;
+ u32 start_idx = clean_idx;
struct bq_desc *sbq_desc;
u64 map;
int i;
@@ -990,13 +1082,15 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq_prod_idx += 16;
if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"sbq: updating prod idx = %d.\n",
rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
-
- rx_ring->sbq_free_cnt -= 16;
}
}
@@ -1412,6 +1506,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
+ u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
@@ -1463,18 +1559,26 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
}
+
qdev->stats.rx_packets++;
qdev->stats.rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Passing a VLAN packet upstream.\n");
- vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
- le16_to_cpu(ib_mac_rsp->vlan_id));
+ skb_record_rx_queue(skb,
+ rx_ring->cq_id - qdev->rss_ring_first_cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (vlan_id != 0))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
} else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Passing a normal packet upstream.\n");
- netif_receive_skb(skb);
+ if (qdev->vlgrp &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (vlan_id != 0))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
}
}
@@ -1521,14 +1625,12 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
/* Fire up a handler to reset the MPI processor. */
void ql_queue_fw_error(struct ql_adapter *qdev)
{
- netif_stop_queue(qdev->ndev);
netif_carrier_off(qdev->ndev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void ql_queue_asic_error(struct ql_adapter *qdev)
{
- netif_stop_queue(qdev->ndev);
netif_carrier_off(qdev->ndev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
@@ -1583,6 +1685,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
struct ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
+ struct tx_ring *tx_ring;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
@@ -1608,15 +1711,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
ql_write_cq_idx(rx_ring);
- if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
- struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+ tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+ if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
+ net_rsp != NULL) {
if (atomic_read(&tx_ring->queue_stopped) &&
(atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
- netif_wake_queue(qdev->ndev);
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return count;
@@ -1677,7 +1781,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
rx_ring->cq_id);
if (work_done < budget) {
- __netif_rx_complete(napi);
+ napi_complete(napi);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
@@ -1703,19 +1807,29 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
u32 enable_bit = MAC_ADDR_E;
+ int status;
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
spin_lock(&qdev->hw_lock);
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
}
spin_unlock(&qdev->hw_lock);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
u32 enable_bit = 0;
+ int status;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
spin_lock(&qdev->hw_lock);
if (ql_set_mac_addr_reg
@@ -1723,6 +1837,7 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
}
spin_unlock(&qdev->hw_lock);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -1762,7 +1877,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
- netif_rx_schedule(&rx_ring->napi);
+ napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
@@ -1848,7 +1963,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
&rx_ring->rx_work,
0);
else
- netif_rx_schedule(&rx_ring->napi);
+ napi_schedule(&rx_ring->napi);
work_done++;
}
}
@@ -1937,7 +2052,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+ u32 tx_ring_idx = (u32) skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
@@ -1948,7 +2063,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
QPRINTK(qdev, TX_QUEUED, INFO,
"%s: shutting down tx queue %d du to lack of resources.\n",
__func__, tx_ring_idx);
- netif_stop_queue(ndev);
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
return NETDEV_TX_BUSY;
}
@@ -2029,6 +2144,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
+ memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
qdev->tx_ring_shadow_reg_area =
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
@@ -2037,6 +2153,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
+ memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
return 0;
err_wqp_sh_area:
@@ -2121,47 +2238,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
}
-/*
- * Allocate and map a page for each element of the lbq.
- */
-static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *lbq_desc;
- u64 map;
- __le64 *bq = rx_ring->lbq_base;
-
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(lbq_desc));
- lbq_desc->addr = bq;
- lbq_desc->index = i;
- lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
- if (unlikely(!lbq_desc->p.lbq_page)) {
- QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
- goto mem_error;
- } else {
- map = pci_map_page(qdev->pdev,
- lbq_desc->p.lbq_page,
- 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR,
- "PCI mapping failed.\n");
- goto mem_error;
- }
- pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
- *lbq_desc->addr = cpu_to_le64(map);
- }
- bq++;
- }
- return 0;
-mem_error:
- ql_free_lbq_buffers(qdev, rx_ring);
- return -ENOMEM;
-}
-
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
@@ -2184,63 +2260,72 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
}
-/* Allocate and map an skb for each element of the sbq. */
-static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+ int i;
+ struct rx_ring *rx_ring;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->lbq)
+ ql_free_lbq_buffers(qdev, rx_ring);
+ if (rx_ring->sbq)
+ ql_free_sbq_buffers(qdev, rx_ring);
+ }
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->type != TX_Q)
+ ql_update_buffer_queues(qdev, rx_ring);
+ }
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *lbq_desc;
+ __le64 *bq = rx_ring->lbq_base;
+
+ memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+ for (i = 0; i < rx_ring->lbq_len; i++) {
+ lbq_desc = &rx_ring->lbq[i];
+ memset(lbq_desc, 0, sizeof(*lbq_desc));
+ lbq_desc->index = i;
+ lbq_desc->addr = bq;
+ bq++;
+ }
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
int i;
struct bq_desc *sbq_desc;
- struct sk_buff *skb;
- u64 map;
__le64 *bq = rx_ring->sbq_base;
+ memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(sbq_desc));
+ memset(sbq_desc, 0, sizeof(*sbq_desc));
sbq_desc->index = i;
sbq_desc->addr = bq;
- skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
- if (unlikely(!skb)) {
- /* Better luck next round */
- QPRINTK(qdev, IFUP, ERR,
- "small buff alloc failed for %d bytes at index %d.\n",
- rx_ring->sbq_buf_size, i);
- goto mem_err;
- }
- skb_reserve(skb, QLGE_SB_PAD);
- sbq_desc->p.skb = skb;
- /*
- * Map only half the buffer. Because the
- * other half may get some data copied to it
- * when the completion arrives.
- */
- map = pci_map_single(qdev->pdev,
- skb->data,
- rx_ring->sbq_buf_size / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
- goto mem_err;
- }
- pci_unmap_addr_set(sbq_desc, mapaddr, map);
- pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
- *sbq_desc->addr = cpu_to_le64(map);
bq++;
}
- return 0;
-mem_err:
- ql_free_sbq_buffers(qdev, rx_ring);
- return -ENOMEM;
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
- if (rx_ring->sbq_len)
- ql_free_sbq_buffers(qdev, rx_ring);
- if (rx_ring->lbq_len)
- ql_free_lbq_buffers(qdev, rx_ring);
-
/* Free the small buffer queue. */
if (rx_ring->sbq_base) {
pci_free_consistent(qdev->pdev,
@@ -2318,11 +2403,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
goto err_mem;
}
- if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer allocation failed.\n");
- goto err_mem;
- }
+ ql_init_sbq_ring(qdev, rx_ring);
}
if (rx_ring->lbq_len) {
@@ -2350,14 +2431,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
goto err_mem;
}
- /*
- * Allocate the buffers.
- */
- if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer allocation failed.\n");
- goto err_mem;
- }
+ ql_init_lbq_ring(qdev, rx_ring);
}
return 0;
@@ -2451,6 +2525,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u16 bq_len;
+ u64 tmp;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2496,7 +2571,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
FLAGS_LI; /* Load irq delay values */
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
+ tmp = (u64)rx_ring->lbq_base_dma;;
+ *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2505,25 +2581,26 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
bq_len = (rx_ring->lbq_len == 65536) ? 0 :
(u16) rx_ring->lbq_len;
cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
+ rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
- rx_ring->lbq_free_cnt = 16;
+ rx_ring->lbq_clean_idx = 0;
+ rx_ring->lbq_free_cnt = rx_ring->lbq_len;
}
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
+ tmp = (u64)rx_ring->sbq_base_dma;;
+ *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
- cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
+ cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
+ rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
- rx_ring->sbq_free_cnt = 16;
+ rx_ring->sbq_clean_idx = 0;
+ rx_ring->sbq_free_cnt = rx_ring->sbq_len;
}
switch (rx_ring->type) {
case TX_Q:
@@ -2569,24 +2646,13 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
rx_ring->type);
}
- QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
+ QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
return err;
}
- QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
- /*
- * Advance the producer index for the buffer queues.
- */
- wmb();
- if (rx_ring->lbq_len)
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
- if (rx_ring->sbq_len)
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
return err;
}
@@ -2633,7 +2699,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
return err;
}
- QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
+ QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
return err;
}
@@ -2675,7 +2741,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
(qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
qdev->intr_count = qdev->rx_ring_count;
- QPRINTK(qdev, IFUP, INFO,
+ QPRINTK(qdev, IFUP, DEBUG,
"MSI-X Enabled, got %d vectors.\n",
qdev->intr_count);
return;
@@ -2802,11 +2868,11 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- QPRINTK(qdev, IFDOWN, ERR,
+ QPRINTK(qdev, IFDOWN, DEBUG,
"freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- QPRINTK(qdev, IFDOWN, ERR,
+ QPRINTK(qdev, IFDOWN, DEBUG,
"freeing msi interrupt %d.\n", i);
}
}
@@ -2837,7 +2903,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
i);
goto err_irq;
} else {
- QPRINTK(qdev, IFUP, INFO,
+ QPRINTK(qdev, IFUP, DEBUG,
"Hooked intr %d, queue type %s%s%s, with name %s.\n",
i,
qdev->rx_ring[i].type ==
@@ -2912,14 +2978,14 @@ static int ql_start_rss(struct ql_adapter *qdev)
get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
- QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
+ QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
return status;
}
- QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
+ QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
return status;
}
@@ -2929,13 +2995,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
int status = 0;
int i;
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to init routing register for CAM packets.\n");
- return status;
+ goto exit;
}
}
@@ -2943,13 +3013,13 @@ static int ql_route_initialize(struct ql_adapter *qdev)
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to init routing register for error packets.\n");
- return status;
+ goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to init routing register for broadcast packets.\n");
- return status;
+ goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
* routing block.
@@ -2960,17 +3030,39 @@ static int ql_route_initialize(struct ql_adapter *qdev)
if (status) {
QPRINTK(qdev, IFUP, ERR,
"Failed to init routing register for MATCH RSS packets.\n");
- return status;
+ goto exit;
}
}
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
- if (status) {
+ if (status)
QPRINTK(qdev, IFUP, ERR,
"Failed to init routing register for CAM packets.\n");
+exit:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+ int status;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
return status;
}
+
+ status = ql_route_initialize(qdev);
+ if (status)
+ QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+
return status;
}
@@ -3038,28 +3130,24 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
}
}
- status = ql_port_initialize(qdev);
- if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
- return status;
- }
+ /* Initialize the port and set the max framesize. */
+ status = qdev->nic_ops->port_initialize(qdev);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ return status;
+ }
- status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func);
+ /* Set up the MAC address and frame routing filter. */
+ status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
- return status;
- }
-
- status = ql_route_initialize(qdev);
- if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+ QPRINTK(qdev, IFUP, ERR,
+ "Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
- QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
+ QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
i);
napi_enable(&qdev->rx_ring[i].napi);
}
@@ -3071,36 +3159,23 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
static int ql_adapter_reset(struct ql_adapter *qdev)
{
u32 value;
- int max_wait_time;
int status = 0;
- int resetCnt = 0;
+ unsigned long end_jiffies = jiffies +
+ max((unsigned long)1, usecs_to_jiffies(30));
-#define MAX_RESET_CNT 1
-issueReset:
- resetCnt++;
- QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
- /* Wait for reset to complete. */
- max_wait_time = 3;
- QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
- max_wait_time);
+
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
break;
+ cpu_relax();
+ } while (time_before(jiffies, end_jiffies));
- ssleep(1);
- } while ((--max_wait_time));
if (value & RST_FO_FR) {
QPRINTK(qdev, IFDOWN, ERR,
- "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
- if (resetCnt < MAX_RESET_CNT)
- goto issueReset;
- }
- if (max_wait_time == 0) {
- status = -ETIMEDOUT;
- QPRINTK(qdev, IFDOWN, ERR,
"ETIMEOUT!!! errored out of resetting the chip!\n");
+ status = -ETIMEDOUT;
}
return status;
@@ -3123,12 +3198,10 @@ static void ql_display_dev_info(struct net_device *ndev)
static int ql_adapter_down(struct ql_adapter *qdev)
{
- struct net_device *ndev = qdev->ndev;
int i, status = 0;
struct rx_ring *rx_ring;
- netif_stop_queue(ndev);
- netif_carrier_off(ndev);
+ netif_carrier_off(qdev->ndev);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3137,6 +3210,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
/* The default queue at index 0 is always processed in
* a workqueue.
@@ -3171,6 +3246,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
+ ql_free_rx_buffers(qdev);
+
spin_lock(&qdev->hw_lock);
status = ql_adapter_reset(qdev);
if (status)
@@ -3184,21 +3261,19 @@ static int ql_adapter_up(struct ql_adapter *qdev)
{
int err = 0;
- spin_lock(&qdev->hw_lock);
err = ql_adapter_initialize(qdev);
if (err) {
QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
spin_unlock(&qdev->hw_lock);
goto err_init;
}
- spin_unlock(&qdev->hw_lock);
set_bit(QL_ADAPTER_UP, &qdev->flags);
+ ql_alloc_rx_buffers(qdev);
+ if ((ql_read32(qdev, STS) & qdev->port_init))
+ netif_carrier_on(qdev->ndev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
- if ((ql_read32(qdev, STS) & qdev->port_init)) {
- netif_carrier_on(qdev->ndev);
- netif_start_queue(qdev->ndev);
- }
+ netif_tx_start_all_queues(qdev->ndev);
return 0;
err_init:
@@ -3206,28 +3281,6 @@ err_init:
return err;
}
-static int ql_cycle_adapter(struct ql_adapter *qdev)
-{
- int status;
-
- status = ql_adapter_down(qdev);
- if (status)
- goto error;
-
- status = ql_adapter_up(qdev);
- if (status)
- goto error;
-
- return status;
-error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device\n");
- rtnl_lock();
- dev_close(qdev->ndev);
- rtnl_unlock();
- return status;
-}
-
static void ql_release_adapter_resources(struct ql_adapter *qdev)
{
ql_free_mem_resources(qdev);
@@ -3308,6 +3361,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
* completion handler rx_rings.
*/
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+ netif_set_gso_max_size(qdev->ndev, 65536);
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
@@ -3414,6 +3468,8 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
if (ndev->mtu == 1500 && new_mtu == 9000) {
QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 0);
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3436,8 +3492,11 @@ static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct dev_mc_list *mc_ptr;
- int i;
+ int i, status;
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return;
spin_lock(&qdev->hw_lock);
/*
* Set or clear promiscuous mode if a
@@ -3493,14 +3552,19 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
if (ndev->mc_count) {
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ goto exit;
for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
i++, mc_ptr = mc_ptr->next)
if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
QPRINTK(qdev, HW, ERR,
"Failed to loadmulticast address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
QPRINTK(qdev, HW, ERR,
@@ -3511,13 +3575,14 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
exit:
spin_unlock(&qdev->hw_lock);
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct sockaddr *addr = p;
- int ret = 0;
+ int status;
if (netif_running(ndev))
return -EBUSY;
@@ -3526,15 +3591,17 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
spin_lock(&qdev->hw_lock);
- if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
- QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
- ret = -1;
- }
+ status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
spin_unlock(&qdev->hw_lock);
-
- return ret;
+ if (status)
+ QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
}
static void qlge_tx_timeout(struct net_device *ndev)
@@ -3547,9 +3614,37 @@ static void ql_asic_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, asic_reset_work.work);
- ql_cycle_adapter(qdev);
+ int status;
+
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ return;
+error:
+ QPRINTK(qdev, IFUP, ALERT,
+ "Driver up/down cycle failed, closing device\n");
+ rtnl_lock();
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ rtnl_unlock();
}
+static struct nic_operations qla8012_nic_ops = {
+ .get_flash = ql_get_8012_flash_params,
+ .port_initialize = ql_8012_port_initialize,
+};
+
+static struct nic_operations qla8000_nic_ops = {
+ .get_flash = ql_get_8000_flash_params,
+ .port_initialize = ql_8000_port_initialize,
+};
+
+
static void ql_get_board_info(struct ql_adapter *qdev)
{
qdev->func =
@@ -3568,6 +3663,11 @@ static void ql_get_board_info(struct ql_adapter *qdev)
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
}
qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+ qdev->device_id = qdev->pdev->device;
+ if (qdev->device_id == QLGE_DEVICE_ID_8012)
+ qdev->nic_ops = &qla8012_nic_ops;
+ else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+ qdev->nic_ops = &qla8000_nic_ops;
}
static void ql_release_all(struct pci_dev *pdev)
@@ -3660,24 +3760,20 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
goto err_out;
}
- ql_get_board_info(qdev);
qdev->ndev = ndev;
qdev->pdev = pdev;
+ ql_get_board_info(qdev);
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
/* make sure the EEPROM is good */
- err = ql_get_flash_params(qdev);
+ err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
goto err_out;
}
- if (!is_valid_ether_addr(qdev->flash.mac_addr))
- goto err_out;
-
- memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
@@ -3700,6 +3796,10 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+ INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+ INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ mutex_init(&qdev->mpi_mutex);
+ init_completion(&qdev->ide_completion);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
@@ -3737,7 +3837,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
static int cards_found = 0;
int err = 0;
- ndev = alloc_etherdev(sizeof(struct ql_adapter));
+ ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+ min(MAX_CPUS, (int)num_online_cpus()));
if (!ndev)
return -ENOMEM;
@@ -3757,6 +3858,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
| NETIF_F_TSO_ECN
| NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+ ndev->features |= NETIF_F_GRO;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
@@ -3779,7 +3881,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
return err;
}
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
ql_display_dev_info(ndev);
cards_found++;
return 0;
@@ -3833,7 +3934,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
ql_adapter_reset(qdev);
/* Make sure the EEPROM is good */
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index fa31891b6e6..9f81b797f10 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,6 +1,26 @@
#include "qlge.h"
-static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+static void ql_display_mb_sts(struct ql_adapter *qdev,
+ struct mbox_params *mbcp)
+{
+ int i;
+ static char *err_sts[] = {
+ "Command Complete",
+ "Command Not Supported",
+ "Host Interface Error",
+ "Checksum Error",
+ "Unused Completion Status",
+ "Test Failed",
+ "Command Parameter Error"};
+
+ QPRINTK(qdev, DRV, DEBUG, "%s.\n",
+ err_sts[mbcp->mbox_out[0] & 0x0000000f]);
+ for (i = 0; i < mbcp->out_count; i++)
+ QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
+}
+
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
@@ -19,6 +39,32 @@ exit:
return status;
}
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* write the data to the data reg */
+ ql_write32(qdev, PROC_DATA, data);
+ /* trigger the write */
+ ql_write32(qdev, PROC_ADDR, reg);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+exit:
+ return status;
+}
+
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_write_mpi_reg(qdev, 0x00001010, 1);
+ return status;
+}
+
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -28,7 +74,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
return -EBUSY;
for (i = 0; i < mbcp->out_count; i++) {
status =
- ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
+ ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
@@ -39,102 +85,762 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
+/* Wait for a single mailbox command to complete.
+ * Returns zero on success.
+ */
+static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+{
+ int count = 50; /* TODO: arbitrary for now. */
+ u32 value;
+
+ do {
+ value = ql_read32(qdev, STS);
+ if (value & STS_PI)
+ return 0;
+ udelay(UDELAY_DELAY); /* 10us */
+ } while (--count);
+ return -ETIMEDOUT;
+}
+
+/* Execute a single mailbox command.
+ * Caller must hold PROC_ADDR semaphore.
+ */
+static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int i, status;
+
+ /*
+ * Make sure there's nothing pending.
+ * This shouldn't happen.
+ */
+ if (ql_read32(qdev, CSR) & CSR_HRI)
+ return -EIO;
+
+ status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ if (status)
+ return status;
+
+ /*
+ * Fill the outbound mailboxes.
+ */
+ for (i = 0; i < mbcp->in_count; i++) {
+ status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
+ mbcp->mbox_in[i]);
+ if (status)
+ goto end;
+ }
+ /*
+ * Wake up the MPI firmware.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+end:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+ return status;
+}
+
+/* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * paramters, or loopback mode. We wake up a worker
+ * to handler processing this since a mailbox command
+ * will need to be sent to ACK the request.
+ */
+static int ql_idc_req_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+
+ QPRINTK(qdev, DRV, ERR, "Enter!\n");
+ /* Get the status data and start up a thread to
+ * handle the request.
+ */
+ mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ } else {
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
+ }
+ return status;
+}
+
+/* Process an inter-device event completion.
+ * If good, signal the caller's completion.
+ */
+static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Could not read MPI, resetting RISC!\n");
+ ql_queue_fw_error(qdev);
+ } else
+ /* Wake up the sleeping mpi_idc_work thread that is
+ * waiting for this event.
+ */
+ complete(&qdev->ide_completion);
+
+ return status;
+}
+
static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
+ int status;
mbcp->out_count = 2;
- if (ql_get_mb_sts(qdev, mbcp))
- goto exit;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "%s: Could not get mailbox status.\n", __func__);
+ return;
+ }
qdev->link_status = mbcp->mbox_out[1];
QPRINTK(qdev, DRV, ERR, "Link Up.\n");
- QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
- if (!netif_carrier_ok(qdev->ndev)) {
- QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
- netif_carrier_on(qdev->ndev);
- netif_wake_queue(qdev->ndev);
+
+ /* If we're coming back from an IDC event
+ * then set up the CAM and frame routing.
+ */
+ if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
+ status = ql_cam_route_initialize(qdev);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Failed to init CAM/Routing tables.\n");
+ return;
+ } else
+ clear_bit(QL_CAM_RT_SET, &qdev->flags);
}
-exit:
- /* Clear the MPI firmware status. */
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+ /* Queue up a worker to check the frame
+ * size information, and fix it if it's not
+ * to our liking.
+ */
+ if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
+ QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+ set_bit(QL_PORT_CFG, &qdev->flags);
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker dpc.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 0);
+ }
+
+ netif_carrier_on(qdev->ndev);
}
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
+ int status;
+
mbcp->out_count = 3;
- if (ql_get_mb_sts(qdev, mbcp)) {
- QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
- goto exit;
- }
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+
+ netif_carrier_off(qdev->ndev);
+}
+
+static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 5;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+ else
+ QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+
+ return status;
+}
+
+static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 1;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+ else
+ QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+
+ return status;
+}
+
+static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 6;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+ else {
+ int i;
+ QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+ for (i = 0; i < mbcp->out_count; i++)
+ QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
- if (netif_carrier_ok(qdev->ndev)) {
- QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
- netif_carrier_off(qdev->ndev);
- netif_stop_queue(qdev->ndev);
}
- QPRINTK(qdev, DRV, ERR, "Link Down.\n");
- QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
-exit:
- /* Clear the MPI firmware status. */
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+ return status;
}
static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
+ int status;
+
mbcp->out_count = 2;
- if (ql_get_mb_sts(qdev, mbcp)) {
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
- goto exit;
+ } else {
+ QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
+ status = ql_cam_route_initialize(qdev);
+ if (status)
+ QPRINTK(qdev, IFUP, ERR,
+ "Failed to init CAM/Routing tables.\n");
}
- QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
- QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
- mbcp->mbox_out[0]);
- QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
-exit:
- /* Clear the MPI firmware status. */
+}
+
+/* Process an async event and clear it unless it's an
+ * error condition.
+ * This can get called iteratively from the mpi_work thread
+ * when events arrive via an interrupt.
+ * It also gets called when a mailbox command is polling for
+ * it's completion. */
+static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ int orig_count = mbcp->out_count;
+
+ /* Just get mailbox zero for now. */
+ mbcp->out_count = 1;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ goto end;
+ }
+
+ switch (mbcp->mbox_out[0]) {
+
+ /* This case is only active when we arrive here
+ * as a result of issuing a mailbox command to
+ * the firmware.
+ */
+ case MB_CMD_STS_INTRMDT:
+ case MB_CMD_STS_GOOD:
+ case MB_CMD_STS_INVLD_CMD:
+ case MB_CMD_STS_XFC_ERR:
+ case MB_CMD_STS_CSUM_ERR:
+ case MB_CMD_STS_ERR:
+ case MB_CMD_STS_PARAM_ERR:
+ /* We can only get mailbox status if we're polling from an
+ * unfinished command. Get the rest of the status data and
+ * return back to the caller.
+ * We only end up here when we're polling for a mailbox
+ * command completion.
+ */
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ return status;
+
+ /* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * paramters, or loopback mode.
+ */
+ case AEN_IDC_REQ:
+ status = ql_idc_req_aen(qdev);
+ break;
+
+ /* Process and inbound IDC event.
+ * This will happen when we're trying to
+ * change tx/rx max frame size, change pause
+ * paramters or loopback mode.
+ */
+ case AEN_IDC_CMPLT:
+ case AEN_IDC_EXT:
+ status = ql_idc_cmplt_aen(qdev);
+ break;
+
+ case AEN_LINK_UP:
+ ql_link_up(qdev, mbcp);
+ break;
+
+ case AEN_LINK_DOWN:
+ ql_link_down(qdev, mbcp);
+ break;
+
+ case AEN_FW_INIT_DONE:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
+ return status;
+ }
+ ql_init_fw_done(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_IN:
+ ql_sfp_in(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_OUT:
+ ql_sfp_out(qdev, mbcp);
+ break;
+
+ /* This event can arrive at boot time or after an
+ * MPI reset if the firmware failed to initialize.
+ */
+ case AEN_FW_INIT_FAIL:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_ERR;
+ return status;
+ }
+ QPRINTK(qdev, DRV, ERR,
+ "Firmware initialization failed.\n");
+ status = -EIO;
+ ql_queue_fw_error(qdev);
+ break;
+
+ case AEN_SYS_ERR:
+ QPRINTK(qdev, DRV, ERR,
+ "System Error.\n");
+ ql_queue_fw_error(qdev);
+ status = -EIO;
+ break;
+
+ case AEN_AEN_LOST:
+ ql_aen_lost(qdev, mbcp);
+ break;
+
+ default:
+ QPRINTK(qdev, DRV, ERR,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ /* Clear the MPI firmware status. */
+ }
+end:
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ return status;
}
-void ql_mpi_work(struct work_struct *work)
+/* Execute a single mailbox command.
+ * mbcp is a pointer to an array of u32. Each
+ * element in the array contains the value for it's
+ * respective mailbox register.
+ */
+static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status, count;
+
+ mutex_lock(&qdev->mpi_mutex);
+
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+ /* Load the mailbox registers and wake up MPI RISC. */
+ status = ql_exec_mb_cmd(qdev, mbcp);
+ if (status)
+ goto end;
+
+
+ /* If we're generating a system error, then there's nothing
+ * to wait for.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
+ goto end;
+
+ /* Wait for the command to complete. We loop
+ * here because some AEN might arrive while
+ * we're waiting for the mailbox command to
+ * complete. If more than 5 arrive then we can
+ * assume something is wrong. */
+ count = 5;
+ do {
+ /* Wait for the interrupt to come in. */
+ status = ql_wait_mbx_cmd_cmplt(qdev);
+ if (status)
+ goto end;
+
+ /* Process the event. If it's an AEN, it
+ * will be handled in-line or a worker
+ * will be spawned. If it's our completion
+ * we will catch it below.
+ */
+ status = ql_mpi_handler(qdev, mbcp);
+ if (status)
+ goto end;
+
+ /* It's either the completion for our mailbox
+ * command complete or an AEN. If it's our
+ * completion then get out.
+ */
+ if (((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_GOOD) ||
+ ((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_INTRMDT))
+ break;
+ } while (--count);
+
+ if (!count) {
+ QPRINTK(qdev, DRV, ERR,
+ "Timed out waiting for mailbox complete.\n");
+ status = -ETIMEDOUT;
+ goto end;
+ }
+
+ /* Now we can clear the interrupt condition
+ * and look at our status.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+ if (((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_GOOD) &&
+ ((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_INTRMDT)) {
+ ql_display_mb_sts(qdev, mbcp);
+ status = -EIO;
+ }
+end:
+ mutex_unlock(&qdev->mpi_mutex);
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_get_fw_state(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Get Firmware State.\n");
+ status = -EIO;
+ }
+
+ /* If bit zero is set in mbx 1 then the firmware is
+ * running, but not initialized. This should never
+ * happen.
+ */
+ if (mbcp->mbox_out[1] & 1) {
+ QPRINTK(qdev, DRV, ERR,
+ "Firmware waiting for initialization.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+/* Send and ACK mailbox command to the firmware to
+ * let it continue with the change.
+ */
+int ql_mb_idc_ack(struct ql_adapter *qdev)
{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 5;
mbcp->out_count = 1;
- while (ql_read32(qdev, STS) & STS_PI) {
- if (ql_get_mb_sts(qdev, mbcp)) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
- }
+ mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
+ mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
+ mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
+ mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
+ mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
- switch (mbcp->mbox_out[0]) {
- case AEN_LINK_UP:
- ql_link_up(qdev, mbcp);
- break;
- case AEN_LINK_DOWN:
- ql_link_down(qdev, mbcp);
- break;
- case AEN_FW_INIT_DONE:
- ql_init_fw_done(qdev, mbcp);
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed IDC ACK send.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 3;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
+ mbcp->mbox_in[1] = qdev->link_config;
+ mbcp->mbox_in[2] = qdev->max_frame_size;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
+ QPRINTK(qdev, DRV, ERR,
+ "Port Config sent, wait for IDC.\n");
+ } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Set Port Configuration.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed Get Port Configuration.\n");
+ status = -EIO;
+ } else {
+ QPRINTK(qdev, DRV, DEBUG,
+ "Passed Get Port Configuration.\n");
+ qdev->link_config = mbcp->mbox_out[1];
+ qdev->max_frame_size = mbcp->mbox_out[2];
+ }
+ return status;
+}
+
+/* IDC - Inter Device Communication...
+ * Some firmware commands require consent of adjacent FCOE
+ * function. This function waits for the OK, or a
+ * counter-request for a little more time.i
+ * The firmware will complete the request if the other
+ * function doesn't respond.
+ */
+static int ql_idc_wait(struct ql_adapter *qdev)
+{
+ int status = -ETIMEDOUT;
+ long wait_time = 1 * HZ;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ do {
+ /* Wait here for the command to complete
+ * via the IDC process.
+ */
+ wait_time =
+ wait_for_completion_timeout(&qdev->ide_completion,
+ wait_time);
+ if (!wait_time) {
+ QPRINTK(qdev, DRV, ERR,
+ "IDC Timeout.\n");
break;
- case MB_CMD_STS_GOOD:
+ }
+ /* Now examine the response from the IDC process.
+ * We might have a good completion or a request for
+ * more wait time.
+ */
+ if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
+ QPRINTK(qdev, DRV, ERR,
+ "IDC Time Extension from function.\n");
+ wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
+ } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
+ QPRINTK(qdev, DRV, ERR,
+ "IDC Success.\n");
+ status = 0;
break;
- case AEN_FW_INIT_FAIL:
- case AEN_SYS_ERR:
- case MB_CMD_STS_ERR:
- ql_queue_fw_error(qdev);
- default:
- /* Clear the MPI firmware status. */
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ } else {
+ QPRINTK(qdev, DRV, ERR,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
+ status = -EIO;
break;
}
+ } while (wait_time);
+
+ return status;
+}
+
+/* API called in work thread context to set new TX/RX
+ * maximum frame size values to match MTU.
+ */
+static int ql_set_port_cfg(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_mb_set_port_cfg(qdev);
+ if (status)
+ return status;
+ status = ql_idc_wait(qdev);
+ return status;
+}
+
+/* The following routines are worker threads that process
+ * events that may sleep waiting for completion.
+ */
+
+/* This thread gets the maximum TX and RX frame size values
+ * from the firmware and, if necessary, changes them to match
+ * the MTU setting.
+ */
+void ql_mpi_port_cfg_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
+ struct net_device *ndev = qdev->ndev;
+ int status;
+
+ status = ql_mb_get_port_cfg(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Bug: Failed to get port config data.\n");
+ goto err;
+ }
+
+ if (ndev->mtu <= 2500)
+ goto end;
+ else if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+ qdev->max_frame_size ==
+ CFG_DEFAULT_MAX_FRAME_SIZE)
+ goto end;
+
+ qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
+ qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
+ status = ql_set_port_cfg(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Bug: Failed to set port config data.\n");
+ goto err;
+ }
+end:
+ clear_bit(QL_PORT_CFG, &qdev->flags);
+ return;
+err:
+ ql_queue_fw_error(qdev);
+ goto end;
+}
+
+/* Process an inter-device request. This is issues by
+ * the firmware in response to another function requesting
+ * a change to the port. We set a flag to indicate a change
+ * has been made and then send a mailbox command ACKing
+ * the change request.
+ */
+void ql_mpi_idc_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_idc_work.work);
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ u32 aen;
+
+ aen = mbcp->mbox_out[1] >> 16;
+
+ switch (aen) {
+ default:
+ QPRINTK(qdev, DRV, ERR,
+ "Bug: Unhandled IDC action.\n");
+ break;
+ case MB_CMD_PORT_RESET:
+ case MB_CMD_SET_PORT_CFG:
+ case MB_CMD_STOP_FW:
+ netif_carrier_off(qdev->ndev);
+ /* Signal the resulting link up AEN
+ * that the frame routing and mac addr
+ * needs to be set.
+ * */
+ set_bit(QL_CAM_RT_SET, &qdev->flags);
+ status = ql_mb_idc_ack(qdev);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Bug: No pending IDC!\n");
+ }
+ }
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_work.work);
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ mutex_lock(&qdev->mpi_mutex);
+
+ while (ql_read32(qdev, STS) & STS_PI) {
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ mbcp->out_count = 1;
+ ql_mpi_handler(qdev, mbcp);
}
+
+ mutex_unlock(&qdev->mpi_mutex);
ql_enable_completion_interrupt(qdev, 0);
}
@@ -142,9 +848,8 @@ void ql_mpi_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_reset_work.work);
- QPRINTK(qdev, DRV, ERR,
- "Enter, qdev = %p..\n", qdev);
- ql_write32(qdev, CSR, CSR_CMD_SET_RST);
- msleep(50);
- ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ ql_soft_reset_mpi_risc(qdev);
}