summaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h11
-rw-r--r--drivers/net/cxgb3/common.h27
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h6
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h33
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c190
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c96
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h1
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/l2t.h1
-rw-r--r--drivers/net/cxgb3/mc5.c3
-rw-r--r--drivers/net/cxgb3/regs.h32
-rw-r--r--drivers/net/cxgb3/sge.c425
-rw-r--r--drivers/net/cxgb3/t3_hw.c63
-rw-r--r--drivers/net/cxgb3/t3cdev.h1
-rw-r--r--drivers/net/cxgb3/version.h7
-rw-r--r--drivers/net/cxgb3/xgmac.c191
16 files changed, 806 insertions, 282 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 5c97a64451c..80c3d8f268a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -74,6 +74,11 @@ enum { /* adapter flags */
struct rx_desc;
struct rx_sw_desc;
+struct sge_fl_page {
+ struct skb_frag_struct frag;
+ unsigned char *va;
+};
+
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
@@ -81,11 +86,13 @@ struct sge_fl { /* SGE per free-buffer list state */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* free list generation */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ struct sge_fl_page page;
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
- unsigned int cntxt_id; /* SGE context id for the free list */
unsigned long empty; /* # of times queue ran out of buffers */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
};
/*
@@ -121,6 +128,8 @@ struct sge_rspq { /* state for an SGE response queue */
unsigned long empty; /* # of times queue ran out of credits */
unsigned long nomem; /* # of responses deferred due to no mem */
unsigned long unhandled_irqs; /* # of spurious intrs */
+ unsigned long starved;
+ unsigned long restarted;
};
struct tx_desc;
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index e23deeb7d06..8d137963369 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -112,8 +112,7 @@ enum {
};
enum {
- SUPPORTED_OFFLOAD = 1 << 24,
- SUPPORTED_IRQ = 1 << 25
+ SUPPORTED_IRQ = 1 << 24
};
enum { /* adapter interrupt-maintained statistics */
@@ -260,6 +259,10 @@ struct mac_stats {
unsigned long serdes_signal_loss;
unsigned long xaui_pcs_ctc_err;
unsigned long xaui_pcs_align_change;
+
+ unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
+ unsigned long num_resets; /* # times reset due to stuck TX */
+
};
struct tp_mib_stats {
@@ -354,6 +357,9 @@ enum {
MC5_MODE_72_BIT = 2
};
+/* MC5 min active region size */
+enum { MC5_MIN_TIDS = 16 };
+
struct vpd_params {
unsigned int cclk;
unsigned int mclk;
@@ -398,6 +404,13 @@ struct adapter_params {
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
+ unsigned int offload;
+};
+
+enum { /* chip revisions */
+ T3_REV_A = 0,
+ T3_REV_B = 2,
+ T3_REV_B2 = 3,
};
struct trace_params {
@@ -465,6 +478,13 @@ struct cmac {
struct adapter *adapter;
unsigned int offset;
unsigned int nucast; /* # of address filters for unicast MACs */
+ unsigned int tx_tcnt;
+ unsigned int tx_xcnt;
+ u64 tx_mcnt;
+ unsigned int rx_xcnt;
+ u64 rx_mcnt;
+ unsigned int toggle_cnt;
+ unsigned int txen;
struct mac_stats stats;
};
@@ -588,7 +608,7 @@ static inline int is_10G(const struct adapter *adap)
static inline int is_offload(const struct adapter *adap)
{
- return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
+ return adap->params.offload;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
@@ -666,6 +686,7 @@ int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+int t3b2_mac_watchdog_task(struct cmac *mac);
void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index 16e004990c5..483a594210a 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
- * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -68,7 +67,10 @@ static inline union listen_entry *stid2entry(const struct tid_info *t,
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
- return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
+ struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+ &(t->tid_tab[tid]) : NULL;
+
+ return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
}
/*
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
index a94281861a6..0a82fcddf2d 100644
--- a/drivers/net/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -36,28 +36,17 @@
* Ioctl commands specific to this driver.
*/
enum {
- CHELSIO_SETREG = 1024,
- CHELSIO_GETREG,
- CHELSIO_SETTPI,
- CHELSIO_GETTPI,
- CHELSIO_GETMTUTAB,
- CHELSIO_SETMTUTAB,
- CHELSIO_GETMTU,
- CHELSIO_SET_PM,
- CHELSIO_GET_PM,
- CHELSIO_GET_TCAM,
- CHELSIO_SET_TCAM,
- CHELSIO_GET_TCB,
- CHELSIO_GET_MEM,
- CHELSIO_LOAD_FW,
- CHELSIO_GET_PROTO,
- CHELSIO_SET_PROTO,
- CHELSIO_SET_TRACE_FILTER,
- CHELSIO_SET_QSET_PARAMS,
- CHELSIO_GET_QSET_PARAMS,
- CHELSIO_SET_QSET_NUM,
- CHELSIO_GET_QSET_NUM,
- CHELSIO_SET_PKTSCHED,
+ CHELSIO_GETMTUTAB = 1029,
+ CHELSIO_SETMTUTAB = 1030,
+ CHELSIO_SET_PM = 1032,
+ CHELSIO_GET_PM = 1033,
+ CHELSIO_GET_MEM = 1038,
+ CHELSIO_LOAD_FW = 1041,
+ CHELSIO_SET_TRACE_FILTER = 1044,
+ CHELSIO_SET_QSET_PARAMS = 1045,
+ CHELSIO_GET_QSET_PARAMS = 1046,
+ CHELSIO_SET_QSET_NUM = 1047,
+ CHELSIO_GET_QSET_NUM = 1048,
};
struct ch_reg {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 43583ed655a..67b4b219d92 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -42,6 +42,7 @@
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -184,16 +185,24 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
int speed, int duplex, int pause)
{
struct net_device *dev = adapter->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
+ struct cmac *mac = &pi->mac;
/* Skip changes from disabled ports. */
if (!netif_running(dev))
return;
if (link_stat != netif_carrier_ok(dev)) {
- if (link_stat)
+ if (link_stat) {
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
netif_carrier_on(dev);
- else
+ } else {
netif_carrier_off(dev);
+ pi->phy.ops->power_down(&pi->phy, 1);
+ t3_mac_disable(mac, MAC_DIRECTION_RX);
+ t3_link_start(&pi->phy, mac, &pi->link_config);
+ }
+
link_report(dev);
}
}
@@ -406,7 +415,7 @@ static void quiesce_rx(struct adapter *adap)
static int setup_sge_qsets(struct adapter *adap)
{
int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
- unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
+ unsigned int ntxq = SGE_TXQ_PER_SET;
if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
irq_idx = -1;
@@ -434,27 +443,25 @@ static int setup_sge_qsets(struct adapter *adap)
static ssize_t attr_show(struct device *d, struct device_attribute *attr,
char *buf,
- ssize_t(*format) (struct adapter *, char *))
+ ssize_t(*format) (struct net_device *, char *))
{
ssize_t len;
- struct adapter *adap = to_net_dev(d)->priv;
/* Synchronize with ioctls that may shut down the device */
rtnl_lock();
- len = (*format) (adap, buf);
+ len = (*format) (to_net_dev(d), buf);
rtnl_unlock();
return len;
}
static ssize_t attr_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len,
- ssize_t(*set) (struct adapter *, unsigned int),
+ ssize_t(*set) (struct net_device *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
char *endp;
ssize_t ret;
unsigned int val;
- struct adapter *adap = to_net_dev(d)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -464,7 +471,7 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
return -EINVAL;
rtnl_lock();
- ret = (*set) (adap, val);
+ ret = (*set) (to_net_dev(d), val);
if (!ret)
ret = len;
rtnl_unlock();
@@ -472,8 +479,9 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
}
#define CXGB3_SHOW(name, val_expr) \
-static ssize_t format_##name(struct adapter *adap, char *buf) \
+static ssize_t format_##name(struct net_device *dev, char *buf) \
{ \
+ struct adapter *adap = dev->priv; \
return sprintf(buf, "%u\n", val_expr); \
} \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
@@ -482,13 +490,17 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
return attr_show(d, attr, buf, format_##name); \
}
-static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
+static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
{
+ struct adapter *adap = dev->priv;
+ int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
+
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
if (val && adap->params.rev == 0)
return -EINVAL;
- if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ min_tids)
return -EINVAL;
adap->params.mc5.nfilters = val;
return 0;
@@ -500,11 +512,14 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
}
-static ssize_t set_nservers(struct adapter *adap, unsigned int val)
+static ssize_t set_nservers(struct net_device *dev, unsigned int val)
{
+ struct adapter *adap = dev->priv;
+
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
- if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
+ MC5_MIN_TIDS)
return -EINVAL;
adap->params.mc5.nservers = val;
return 0;
@@ -704,6 +719,28 @@ static void bind_qsets(struct adapter *adap)
}
}
+#define FW_FNAME "t3fw-%d.%d.%d.bin"
+
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ char buf[64];
+ const struct firmware *fw;
+ struct device *dev = &adap->pdev->dev;
+
+ snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
+ FW_VERSION_MINOR, FW_VERSION_MICRO);
+ ret = request_firmware(&fw, buf, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not upgrade firmware: unable to load %s\n",
+ buf);
+ return ret;
+ }
+ ret = t3_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+ return ret;
+}
+
/**
* cxgb_up - enable the adapter
* @adapter: adapter being enabled
@@ -720,6 +757,8 @@ static int cxgb_up(struct adapter *adap)
if (!(adap->flags & FULL_INIT_DONE)) {
err = t3_check_fw_version(adap);
+ if (err == -EINVAL)
+ err = upgrade_fw(adap);
if (err)
goto out;
@@ -731,6 +770,8 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto out;
+ t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
err = setup_sge_qsets(adap);
if (err)
goto out;
@@ -891,7 +932,7 @@ static int cxgb_open(struct net_device *dev)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- if (!ofld_disable) {
+ if (is_offload(adapter) && !ofld_disable) {
err = offload_open(dev);
if (err)
printk(KERN_WARNING
@@ -1028,7 +1069,11 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"TxCsumOffload ",
"RxCsumGood ",
- "RxDrops "
+ "RxDrops ",
+
+ "CheckTXEnToggled ",
+ "CheckResets ",
+
};
static int get_stats_count(struct net_device *dev)
@@ -1142,6 +1187,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
*data++ = s->rx_cong_drops;
+
+ *data++ = s->num_toggled;
+ *data++ = s->num_resets;
}
static inline void reg_block_dump(struct adapter *ap, void *buf,
@@ -1359,23 +1407,27 @@ static int set_rx_csum(struct net_device *dev, u32 data)
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
- struct adapter *adapter = dev->priv;
+ const struct adapter *adapter = dev->priv;
+ const struct port_info *pi = netdev_priv(dev);
+ const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
e->rx_max_pending = MAX_RX_BUFFERS;
e->rx_mini_max_pending = 0;
e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
e->tx_max_pending = MAX_TXQ_ENTRIES;
- e->rx_pending = adapter->params.sge.qset[0].fl_size;
- e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
- e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
- e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
+ e->rx_pending = q->fl_size;
+ e->rx_mini_pending = q->rspq_size;
+ e->rx_jumbo_pending = q->jumbo_size;
+ e->tx_pending = q->txq_size[0];
}
static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
int i;
+ struct qset_params *q;
struct adapter *adapter = dev->priv;
+ const struct port_info *pi = netdev_priv(dev);
if (e->rx_pending > MAX_RX_BUFFERS ||
e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
@@ -1390,9 +1442,8 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
if (adapter->flags & FULL_INIT_DONE)
return -EBUSY;
- for (i = 0; i < SGE_QSETS; ++i) {
- struct qset_params *q = &adapter->params.sge.qset[i];
-
+ q = &adapter->params.sge.qset[pi->first_qset];
+ for (i = 0; i < pi->nqsets; ++i, ++q) {
q->rspq_size = e->rx_mini_pending;
q->fl_size = e->rx_pending;
q->jumbo_size = e->rx_jumbo_pending;
@@ -1549,32 +1600,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EFAULT;
switch (cmd) {
- case CHELSIO_SETREG:{
- struct ch_reg edata;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if ((edata.addr & 3) != 0
- || edata.addr >= adapter->mmio_len)
- return -EINVAL;
- writel(edata.val, adapter->regs + edata.addr);
- break;
- }
- case CHELSIO_GETREG:{
- struct ch_reg edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if ((edata.addr & 3) != 0
- || edata.addr >= adapter->mmio_len)
- return -EINVAL;
- edata.val = readl(adapter->regs + edata.addr);
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
- }
case CHELSIO_SET_QSET_PARAMS:{
int i;
struct qset_params *q;
@@ -1838,10 +1863,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EINVAL;
/*
- * Version scheme:
- * bits 0..9: chip version
- * bits 10..15: chip revision
- */
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ */
t.version = 3 | (adapter->params.rev << 10);
if (copy_to_user(useraddr, &t, sizeof(t)))
return -EFAULT;
@@ -1890,20 +1915,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
t.trace_rx);
break;
}
- case CHELSIO_SET_PKTSCHED:{
- struct ch_pktsched_params p;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (!adapter->open_device_map)
- return -EAGAIN; /* uP and SGE must be running */
- if (copy_from_user(&p, useraddr, sizeof(p)))
- return -EFAULT;
- send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
- p.binding);
- break;
-
- }
default:
return -EOPNOTSUPP;
}
@@ -2104,6 +2115,42 @@ static void check_link_status(struct adapter *adapter)
}
}
+static void check_t3b2_mac(struct adapter *adapter)
+{
+ int i;
+
+ if (!rtnl_trylock()) /* synchronize with ifdown */
+ return;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+ int status;
+
+ if (!netif_running(dev))
+ continue;
+
+ status = 0;
+ if (netif_running(dev) && netif_carrier_ok(dev))
+ status = t3b2_mac_watchdog_task(&p->mac);
+ if (status == 1)
+ p->mac.stats.num_toggled++;
+ else if (status == 2) {
+ struct cmac *mac = &p->mac;
+
+ t3_mac_set_mtu(mac, dev->mtu);
+ t3_mac_set_address(mac, 0, dev->dev_addr);
+ cxgb_set_rxmode(dev);
+ t3_link_start(&p->phy, mac, &p->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+ t3_port_intr_enable(adapter, p->port_id);
+ p->mac.stats.num_resets++;
+ }
+ }
+ rtnl_unlock();
+}
+
+
static void t3_adap_check_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
@@ -2124,6 +2171,9 @@ static void t3_adap_check_task(struct work_struct *work)
adapter->check_task_cnt = 0;
}
+ if (p->rev == T3_REV_B2)
+ check_t3b2_mac(adapter);
+
/* Schedule the next check update if any port is active. */
spin_lock(&adapter->work_lock);
if (adapter->open_device_map & PORT_MASK)
@@ -2232,9 +2282,9 @@ static void __devinit print_port_info(struct adapter *adap,
if (!test_bit(i, &adap->registered_device_map))
continue;
- printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
+ printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
dev->name, ai->desc, pi->port_type->desc,
- adap->params.rev, buf,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c6b72664318..ebcf35e4cf5 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
- * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -161,14 +160,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
int i;
for_each_port(adapter, i) {
- const struct vlan_group *grp;
+ struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
- dev = grp ? grp->vlan_devices[vlan] : NULL;
+ dev = NULL;
+ if (grp)
+ dev = vlan_group_get_device(grp, vlan);
} else
while (dev->master)
dev = dev->master;
@@ -507,6 +508,7 @@ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
td->tid_release_list = p;
if (!p->ctx)
schedule_work(&td->tid_release_task);
@@ -552,7 +554,9 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
spin_lock_bh(&t->atid_lock);
- if (t->afree) {
+ if (t->afree &&
+ t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
+ t->ntids) {
union active_open_entry *p = t->afree;
atid = (p - t->atid_tab) + t->atid_base;
@@ -620,7 +624,8 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
t3c_tid->
@@ -639,7 +644,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
@@ -657,7 +662,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
@@ -686,6 +691,28 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
}
}
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ int gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
{
union opcode_tid *p = cplhdr(skb);
@@ -693,30 +720,39 @@ static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[p->opcode]) {
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
struct cpl_abort_req_rss *req = cplhdr(skb);
struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
- struct sk_buff *skb =
- alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
- if (!skb) {
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
printk("do_abort_req_rss: couldn't get skb!\n");
goto out;
}
- skb->priority = CPL_PRIORITY_DATA;
- __skb_put(skb, sizeof(struct cpl_abort_rpl));
- rpl = cplhdr(skb);
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
rpl->wr.wr_hi =
htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
- OPCODE_TID(rpl) =
- htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
- rpl->cmd = req->status;
- cxgb3_ofld_send(dev, skb);
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
out:
return CPL_RET_BUF_DONE;
}
@@ -729,7 +765,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
@@ -740,17 +776,6 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
}
}
-static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected SET_TCB_RPL status %u for tid %u\n",
- rpl->status, GET_TID(rpl));
- return CPL_RET_BUF_DONE;
-}
-
static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_trace_pkt *p = cplhdr(skb);
@@ -758,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
skb->protocol = htons(0xffff);
skb->dev = dev->lldev;
skb_pull(skb, sizeof(*p));
- skb->mac.raw = skb->data;
+ skb_reset_mac_header(skb);
netif_receive_skb(skb);
return 0;
}
@@ -770,7 +795,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[opcode]) {
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
@@ -969,7 +994,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
for (tid = 0; tid < ti->ntids; tid++) {
te = lookup_tid(ti, tid);
BUG_ON(!te);
- if (te->ctx && te->client && te->client->redirect) {
+ if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
l2t_hold(L2DATA(tdev), e);
@@ -1212,7 +1237,8 @@ void __init cxgb3_offload_init(void)
t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
- t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
+ t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 0e6beb69ba1..f15446a32ef 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
- * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 3c0cb855705..d660af74606 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
- * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index ba5d2cbd724..d79001336cf 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
- * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index 644d62ea86a..84c1ffa8e2d 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -328,6 +328,9 @@ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int tcam_size = mc5->tcam_size;
struct adapter *adap = mc5->adapter;
+ if (!tcam_size)
+ return 0;
+
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
return -EINVAL;
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index b56c5f52bcd..e5a553410e2 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1206,6 +1206,14 @@
#define A_TP_RX_TRC_KEY0 0x120
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
+ M_TXDROPCNTCH0RCVD)
+
#define A_ULPRX_CTL 0x500
#define S_ROUND_ROBIN 4
@@ -1226,9 +1234,15 @@
#define A_ULPRX_ISCSI_TAGMASK 0x514
+#define S_HPZ0 0
+#define M_HPZ0 0xf
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
#define A_ULPRX_TDDP_LLIMIT 0x51c
#define A_ULPRX_TDDP_ULIMIT 0x520
+#define A_ULPRX_TDDP_PSZ 0x528
#define A_ULPRX_STAG_LLIMIT 0x52c
@@ -1834,6 +1848,8 @@
#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
#define F_TXPAUSEEN V_TXPAUSEEN(1U)
+#define A_XGM_TX_PAUSE_QUANTA 0x808
+
#define A_XGM_RX_CTRL 0x80c
#define S_RXEN 0
@@ -1920,11 +1936,20 @@
#define A_XGM_TXFIFO_CFG 0x888
+#define S_TXIPG 13
+#define M_TXIPG 0xff
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
#define S_TXFIFOTHRESH 4
#define M_TXFIFOTHRESH 0x1ff
#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
#define A_XGM_SERDES_CTRL 0x890
#define A_XGM_SERDES_CTRL0 0x8e0
@@ -2190,6 +2215,13 @@
#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffff
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f2cf8a07c6..3666586a483 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -45,9 +45,25 @@
#define USE_GTS 0
#define SGE_RX_SM_BUF_SIZE 1536
+
+/*
+ * If USE_RX_PAGE is defined, the small freelist populated with (partial)
+ * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
+ * be a multiple of the host page size).
+ */
+#define USE_RX_PAGE
+#define RX_PAGE_SIZE 2048
+
+/*
+ * skb freelist packets are copied into a new skb (and the freelist one is
+ * reused) if their len is <=
+ */
#define SGE_RX_COPY_THRES 256
-# define SGE_RX_DROP_THRES 16
+/*
+ * Minimum number of freelist entries before we start dropping TUNNEL frames.
+ */
+#define SGE_RX_DROP_THRES 16
/*
* Period of the Tx buffer reclaim timer. This timer does not need to run
@@ -85,7 +101,10 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
};
struct rx_sw_desc { /* SW state per Rx descriptor */
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct sge_fl_page page;
+ } t;
DECLARE_PCI_UNMAP_ADDR(dma_addr);
};
@@ -105,6 +124,15 @@ struct unmap_info { /* packet unmapping info, overlays skb->cb */
};
/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
* Maps a number of flits to the number of Tx descriptors that can hold them.
* The formula is
*
@@ -252,10 +280,13 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
struct pci_dev *pdev = adapter->pdev;
unsigned int cidx = q->cidx;
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (need_skb_unmap())
+ if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->skb->priority == cidx)
kfree_skb(d->skb);
@@ -320,16 +351,27 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
- kfree_skb(d->skb);
- d->skb = NULL;
+
+ if (q->buf_size != RX_PAGE_SIZE) {
+ kfree_skb(d->t.skb);
+ d->t.skb = NULL;
+ } else {
+ if (d->t.page.frag.page)
+ put_page(d->t.page.frag.page);
+ d->t.page.frag.page = NULL;
+ }
if (++cidx == q->size)
cidx = 0;
}
+
+ if (q->page.frag.page)
+ put_page(q->page.frag.page);
+ q->page.frag.page = NULL;
}
/**
* add_one_rx_buf - add a packet buffer to a free-buffer list
- * @skb: the buffer to add
+ * @va: va of the buffer to add
* @len: the buffer length
* @d: the HW Rx descriptor to write
* @sd: the SW Rx descriptor to write
@@ -339,14 +381,13 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
-static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
+static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
struct rx_desc *d, struct rx_sw_desc *sd,
unsigned int gen, struct pci_dev *pdev)
{
dma_addr_t mapping;
- sd->skb = skb;
- mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
pci_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
@@ -371,14 +412,47 @@ static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
+ struct sge_fl_page *p = &q->page;
while (n--) {
- struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ unsigned char *va;
- if (!skb)
- break;
+ if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+
+ if (!skb) {
+ q->alloc_failed++;
+ break;
+ }
+ va = skb->data;
+ sd->t.skb = skb;
+ } else {
+ if (!p->frag.page) {
+ p->frag.page = alloc_pages(gfp, 0);
+ if (unlikely(!p->frag.page)) {
+ q->alloc_failed++;
+ break;
+ } else {
+ p->frag.size = RX_PAGE_SIZE;
+ p->frag.page_offset = 0;
+ p->va = page_address(p->frag.page);
+ }
+ }
+
+ memcpy(&sd->t, p, sizeof(*p));
+ va = p->va;
+
+ p->frag.page_offset += RX_PAGE_SIZE;
+ BUG_ON(p->frag.page_offset > PAGE_SIZE);
+ p->va += RX_PAGE_SIZE;
+ if (p->frag.page_offset == PAGE_SIZE)
+ p->frag.page = NULL;
+ else
+ get_page(p->frag.page);
+ }
+
+ add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
- add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
d++;
sd++;
if (++q->pidx == q->size) {
@@ -413,7 +487,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
struct rx_desc *from = &q->desc[idx];
struct rx_desc *to = &q->desc[q->pidx];
- q->sdesc[q->pidx] = q->sdesc[idx];
+ memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
to->addr_lo = from->addr_lo; /* already big endian */
to->addr_hi = from->addr_hi; /* likewise */
wmb();
@@ -446,7 +520,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
* of the SW ring.
*/
static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
- size_t sw_size, dma_addr_t *phys, void *metadata)
+ size_t sw_size, dma_addr_t * phys, void *metadata)
{
size_t len = nelem * elem_size;
void *s = NULL;
@@ -576,61 +650,6 @@ static inline unsigned int flits_to_desc(unsigned int n)
}
/**
- * get_packet - return the next ingress packet buffer from a free list
- * @adap: the adapter that received the packet
- * @fl: the SGE free list holding the packet
- * @len: the packet length including any SGE padding
- * @drop_thres: # of remaining buffers before we start dropping packets
- *
- * Get the next packet from a free list and complete setup of the
- * sk_buff. If the packet is small we make a copy and recycle the
- * original buffer, otherwise we use the original buffer itself. If a
- * positive drop threshold is supplied packets are dropped and their
- * buffers recycled if (a) the number of remaining buffers is under the
- * threshold and the packet is too big to copy, or (b) the packet should
- * be copied but there is no memory for the copy.
- */
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
- unsigned int len, unsigned int drop_thres)
-{
- struct sk_buff *skb = NULL;
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
- prefetch(sd->skb->data);
-
- if (len <= SGE_RX_COPY_THRES) {
- skb = alloc_skb(len, GFP_ATOMIC);
- if (likely(skb != NULL)) {
- __skb_put(skb, len);
- pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd,
- dma_addr),
- len, PCI_DMA_FROMDEVICE);
- memcpy(skb->data, sd->skb->data, len);
- pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd,
- dma_addr),
- len, PCI_DMA_FROMDEVICE);
- } else if (!drop_thres)
- goto use_orig_buf;
- recycle:
- recycle_rx_buf(adap, fl, fl->cidx);
- return skb;
- }
-
- if (unlikely(fl->credits < drop_thres))
- goto recycle;
-
- use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
- skb = sd->skb;
- skb_put(skb, len);
- __refill_fl(adap, fl);
- return skb;
-}
-
-/**
* get_imm_packet - return the next ingress packet buffer from a response
* @resp: the response descriptor containing the packet data
*
@@ -642,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
+ skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
}
return skb;
}
@@ -878,11 +897,11 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
d->flit[2] = 0;
cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
hdr->cntrl = htonl(cntrl);
- eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
CPL_ETH_II : CPL_ETH_II_VLAN;
tso_info |= V_LSO_ETH_TYPE(eth_type) |
- V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
- V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+ V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+ V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
hdr->lso_info = htonl(tso_info);
flits = 3;
} else {
@@ -894,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
if (skb->len <= WR_LEN - sizeof(*cpl)) {
q->sdesc[pidx].skb = NULL;
if (!skb->data_len)
- memcpy(&d->flit[2], skb->data, skb->len);
+ skb_copy_from_linear_data(skb, &d->flit[2],
+ skb->len);
else
skb_copy_bits(skb, 0, &d->flit[2], skb->len);
@@ -1227,6 +1247,50 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
}
/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+ const struct unmap_info *ui = (struct unmap_info *)skb->cb;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (ui->len)
+ pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
* write_ofld_wr - write an offload work request
* @adap: the adapter
* @skb: the packet to send
@@ -1256,14 +1320,20 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
/* Only TX_DATA builds SGLs */
from = (struct work_request_hdr *)skb->data;
- memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
+ memcpy(&d->flit[1], &from[1],
+ skb_transport_offset(skb) - sizeof(*from));
- flits = (skb->h.raw - skb->data) / 8;
+ flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
adap->pdev);
- if (need_skb_unmap())
- ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
+ ((struct unmap_info *)skb->cb)->len = (skb->tail -
+ skb->transport_header);
+ }
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
gen, from->wr_hi, from->wr_lo);
@@ -1283,8 +1353,8 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
if (skb->len <= WR_LEN && cnt == 0)
return 1; /* packet fits as immediate data */
- flits = (skb->h.raw - skb->data) / 8; /* headers */
- if (skb->tail != skb->h.raw)
+ flits = skb_transport_offset(skb) / 8; /* headers */
+ if (skb->tail != skb->transport_header)
cnt++;
return flits_to_desc(flits + sgl_len(cnt));
}
@@ -1554,7 +1624,9 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
unsigned int gather_idx)
{
rq->offload_pkts++;
- skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
if (rq->polling) {
rx_gather[gather_idx++] = skb;
@@ -1617,11 +1689,9 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
struct port_info *pi;
- rq->eth_pkts++;
skb_pull(skb, sizeof(*p) + pad);
- skb->dev = adap->port[p->iff];
skb->dev->last_rx = jiffies;
- skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev);
if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
!p->fragment) {
@@ -1645,6 +1715,85 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
netif_rx(skb);
}
+#define SKB_DATA_SIZE 128
+
+static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
+ unsigned int len)
+{
+ skb->len = len;
+ if (len <= SKB_DATA_SIZE) {
+ skb_copy_to_linear_data(skb, p->va, len);
+ skb->tail += len;
+ put_page(p->frag.page);
+ } else {
+ skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
+ skb_shinfo(skb)->frags[0].page = p->frag.page;
+ skb_shinfo(skb)->frags[0].page_offset =
+ p->frag.page_offset + SKB_DATA_SIZE;
+ skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
+ skb_shinfo(skb)->nr_frags = 1;
+ skb->data_len = len - SKB_DATA_SIZE;
+ skb->tail += SKB_DATA_SIZE;
+ skb->truesize += skb->data_len;
+ }
+}
+
+/**
+* get_packet - return the next ingress packet buffer from a free list
+* @adap: the adapter that received the packet
+* @fl: the SGE free list holding the packet
+* @len: the packet length including any SGE padding
+* @drop_thres: # of remaining buffers before we start dropping packets
+*
+* Get the next packet from a free list and complete setup of the
+* sk_buff. If the packet is small we make a copy and recycle the
+* original buffer, otherwise we use the original buffer itself. If a
+* positive drop threshold is supplied packets are dropped and their
+* buffers recycled if (a) the number of remaining buffers is under the
+* threshold and the packet is too big to copy, or (b) the packet should
+* be copied but there is no memory for the copy.
+*/
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->t.skb->data);
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ struct rx_desc *d = &fl->desc[fl->cidx];
+ dma_addr_t mapping =
+ (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
+ be32_to_cpu(d->addr_lo));
+
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(sd->t.skb, skb->data, len);
+ pci_dma_sync_single_for_device(adap->pdev, mapping, len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres))
+ goto recycle;
+
+use_orig_buf:
+ pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb = sd->t.skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
/**
* handle_rsp_cntrl_info - handles control information in a response
* @qs: the queue set corresponding to the response
@@ -1767,7 +1916,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
q->next_holdoff = q->holdoff_tmr;
while (likely(budget_left && is_new_response(r, q))) {
- int eth, ethpad = 0;
+ int eth, ethpad = 2;
struct sk_buff *skb = NULL;
u32 len, flags = ntohl(r->flags);
u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
@@ -1794,18 +1943,56 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
break;
}
q->imm_data++;
+ ethpad = 0;
} else if ((len = ntohl(r->len_cq)) != 0) {
- struct sge_fl *fl;
+ struct sge_fl *fl =
+ (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+
+ if (fl->buf_size == RX_PAGE_SIZE) {
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct sge_fl_page *p = &sd->t.page;
+
+ prefetch(p->va);
+ prefetch(p->va + L1_CACHE_BYTES);
+
+ __refill_fl(adap, fl);
+
+ pci_unmap_single(adap->pdev,
+ pci_unmap_addr(sd, dma_addr),
+ fl->buf_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (eth) {
+ if (unlikely(fl->credits <
+ SGE_RX_DROP_THRES))
+ goto eth_recycle;
+
+ skb = alloc_skb(SKB_DATA_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+eth_recycle:
+ q->rx_drops++;
+ recycle_rx_buf(adap, fl,
+ fl->cidx);
+ goto eth_done;
+ }
+ } else {
+ skb = alloc_skb(SKB_DATA_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto no_mem;
+ }
+
+ skb_data_init(skb, p, G_RSPD_LEN(len));
+eth_done:
+ fl->credits--;
+ q->eth_pkts++;
+ } else {
+ fl->credits--;
+ skb = get_packet(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ }
- fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
- fl->credits--;
- skb = get_packet(adap, fl, G_RSPD_LEN(len),
- eth ? SGE_RX_DROP_THRES : 0);
- if (!skb)
- q->rx_drops++;
- else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
- __skb_pull(skb, 2);
- ethpad = 2;
if (++fl->cidx == fl->size)
fl->cidx = 0;
} else
@@ -1829,18 +2016,23 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
q->credits = 0;
}
- if (likely(skb != NULL)) {
+ if (skb) {
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
- /* Preserve the RSS info in csum & priority */
- skb->csum = rss_hi;
- skb->priority = rss_lo;
- ngathered = rx_offload(&adap->tdev, q, skb,
- offload_skbs, ngathered);
+ if (unlikely(r->rss_hdr.opcode ==
+ CPL_TRACE_PKT))
+ __skb_pull(skb, ethpad);
+
+ ngathered = rx_offload(&adap->tdev, q,
+ skb, offload_skbs,
+ ngathered);
}
}
-
--budget_left;
}
@@ -2320,10 +2512,23 @@ static void sge_timer_cb(unsigned long data)
&adap->sge.qs[0].rspq.lock;
if (spin_trylock_irq(lock)) {
if (!napi_is_scheduled(qs->netdev)) {
+ u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
if (qs->fl[0].credits < qs->fl[0].size)
__refill_fl(adap, &qs->fl[0]);
if (qs->fl[1].credits < qs->fl[1].size)
__refill_fl(adap, &qs->fl[1]);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.credits--;
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
}
spin_unlock_irq(lock);
}
@@ -2431,14 +2636,22 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
- if (ntxq == 1) {
+ if (!is_offload(adapter)) {
+#ifdef USE_RX_PAGE
+ q->fl[0].buf_size = RX_PAGE_SIZE;
+#else
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
+#endif
q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
sizeof(struct cpl_rx_pkt);
} else {
+#ifdef USE_RX_PAGE
+ q->fl[0].buf_size = RX_PAGE_SIZE;
+#else
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
sizeof(struct cpl_rx_data);
+#endif
q->fl[1].buf_size = (16 * 1024) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -2632,7 +2845,7 @@ void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
q->polling = adap->params.rev > 0;
q->coalesce_usecs = 5;
q->rspq_size = 1024;
- q->fl_size = 4096;
+ q->fl_size = 1024;
q->jumbo_size = 512;
q->txq_size[TXQ_ETH] = 1024;
q->txq_size[TXQ_OFLD] = 1024;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 365a7f5b1f9..fb485d0a43d 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -438,23 +438,23 @@ static const struct adapter_info t3_adap_info[] = {
{2, 0, 0, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
- SUPPORTED_OFFLOAD,
+ 0,
&mi1_mdio_ops, "Chelsio PE9000"},
{2, 0, 0, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
- SUPPORTED_OFFLOAD,
+ 0,
&mi1_mdio_ops, "Chelsio T302"},
{1, 0, 0, 0,
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
- SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T310"},
{2, 0, 0, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
- SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T320"},
};
@@ -681,7 +681,8 @@ enum {
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
- FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
+ FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
+ FW_MIN_SIZE = 8 /* at least version and csum */
};
/**
@@ -884,11 +885,13 @@ int t3_check_fw_version(struct adapter *adapter)
major = G_FW_VERSION_MAJOR(vers);
minor = G_FW_VERSION_MINOR(vers);
- if (type == FW_VERSION_T3 && major == 3 && minor == 1)
+ if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
+ minor == FW_VERSION_MINOR)
return 0;
CH_ERR(adapter, "found wrong FW version(%u.%u), "
- "driver needs version 3.1\n", major, minor);
+ "driver needs version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
return -EINVAL;
}
@@ -933,7 +936,7 @@ int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
const u32 *p = (const u32 *)fw_data;
int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
- if (size & 3)
+ if ((size & 3) || size < FW_MIN_SIZE)
return -EINVAL;
if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
return -EFBIG;
@@ -1520,19 +1523,25 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
*/
int t3_phy_intr_handler(struct adapter *adapter)
{
- static const int intr_gpio_bits[] = { 8, 0x20 };
-
+ u32 mask, gpi = adapter_info(adapter)->gpio_intr;
u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
for_each_port(adapter, i) {
- if (cause & intr_gpio_bits[i]) {
- struct cphy *phy = &adap2pinfo(adapter, i)->phy;
- int phy_cause = phy->ops->intr_handler(phy);
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ mask = gpi - (gpi & (gpi - 1));
+ gpi -= mask;
+
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & mask) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
if (phy_cause & cphy_cause_link_change)
t3_link_changed(adapter, i);
if (phy_cause & cphy_cause_fifo_error)
- phy->fifo_errors++;
+ p->phy.fifo_errors++;
}
}
@@ -2897,6 +2906,9 @@ static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
struct adapter *adapter = mc7->adapter;
const struct mc7_timing_params *p = &mc7_timings[mem_type];
+ if (!mc7->size)
+ return 0;
+
val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
slow = val & F_SLOW;
width = G_WIDTH(val);
@@ -3097,8 +3109,10 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
do { /* wait for uP to initialize */
msleep(20);
} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
- if (!attempts)
+ if (!attempts) {
+ CH_ERR(adapter, "uP initialization timed out\n");
goto out_err;
+ }
err = 0;
out_err:
@@ -3198,7 +3212,7 @@ static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
mc7->name = name;
mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
- mc7->size = mc7_calc_size(cfg);
+ mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
mc7->width = G_WIDTH(cfg);
}
@@ -3225,6 +3239,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
t3_write_reg(adapter, A_T3DBG_GPIO_EN,
ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+ t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
if (adapter->params.rev == 0 || !uses_xaui(adapter))
val |= F_ENRGMII;
@@ -3241,15 +3256,17 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
}
/*
- * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
+ * Reset the adapter.
+ * Older PCIe cards lose their config space during reset, PCI-X
* ones don't.
*/
int t3_reset_adapter(struct adapter *adapter)
{
- int i;
+ int i, save_and_restore_pcie =
+ adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
uint16_t devid = 0;
- if (is_pcie(adapter))
+ if (save_and_restore_pcie)
pci_save_state(adapter->pdev);
t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
@@ -3267,7 +3284,7 @@ int t3_reset_adapter(struct adapter *adapter)
if (devid != 0x1425)
return -1;
- if (is_pcie(adapter))
+ if (save_and_restore_pcie)
pci_restore_state(adapter->pdev);
return 0;
}
@@ -3321,7 +3338,13 @@ int __devinit t3_prep_adapter(struct adapter *adapter,
p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
p->ntimer_qs = p->cm_size >= (128 << 20) ||
adapter->params.rev > 0 ? 12 : 6;
+ }
+
+ adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
+ t3_mc7_size(&adapter->pmtx) &&
+ t3_mc7_size(&adapter->cm);
+ if (is_offload(adapter)) {
adapter->params.mc5.nservers = DEFAULT_NSERVERS;
adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
DEFAULT_NFILTERS : 0;
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index 9af3bcd64b3..fa4099bc041 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -1,6 +1,5 @@
/*
* Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
- * Copyright (C) 2006-2007 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 2b67dd523cc..042e27e291c 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,5 +35,10 @@
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.0-ko"
+
+/* Firmware version */
+#define FW_VERSION_MAJOR 3
+#define FW_VERSION_MINOR 3
+#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 907a272ae32..a506792f957 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -124,9 +124,6 @@ int t3_mac_reset(struct cmac *mac)
xaui_serdes_reset(mac);
}
- if (adap->params.rev > 0)
- t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
-
val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
@@ -145,6 +142,58 @@ int t3_mac_reset(struct cmac *mac)
return 0;
}
+int t3b2_mac_reset(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+ u32 val;
+
+ if (!macidx(mac))
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ msleep(10);
+
+ /* Check for xgm Rx fifo empty */
+ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
+ 0x80000000, 1, 5, 2)) {
+ CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
+ macidx(mac));
+ return -1;
+ }
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ val = F_MAC_RESET_;
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ msleep(1);
+ t3b_pcs_reset(mac);
+ }
+ t3_write_reg(adap, A_XGM_RX_CFG + oft,
+ F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
+
+ if (!macidx(mac))
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
+
+ return 0;
+}
+
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
@@ -251,9 +300,11 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
- hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
- hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
- lwm = hwm - 1024;
+ hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu,
+ MAC_RXFIFO_SIZE * 38 / 100);
+ hwm = min(hwm, MAC_RXFIFO_SIZE - 8192);
+ lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
@@ -270,7 +321,15 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
- V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
+ V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
+ V_TXFIFOTHRESH(thres) | V_TXIPG(1));
+
+ if (adap->params.rev > 0)
+ t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
+ (hwm - lwm) * 4 / 8);
+ t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
+ MAC_RXFIFO_SIZE * 4 * 8 / 512);
+
return 0;
}
@@ -298,12 +357,6 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
V_PORTSPEED(M_PORTSPEED), val);
}
- val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
- val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
- if (fc & PAUSE_TX)
- val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
- t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
-
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
(fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
@@ -314,13 +367,28 @@ int t3_mac_enable(struct cmac *mac, int which)
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
-
+ struct mac_stats *s = &mac->stats;
+
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
- t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
+ t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
+ mac->tx_mcnt = s->tx_frames;
+ mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->rx_mcnt = s->rx_frames;
+ mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_RX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->txen = F_TXEN;
+ mac->toggle_cnt = 0;
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
@@ -331,19 +399,102 @@ int t3_mac_disable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
+ int val;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
- t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+ mac->txen = 0;
}
- if (which & MAC_DIRECTION_RX)
+ if (which & MAC_DIRECTION_RX) {
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ msleep(100);
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+ val = F_MAC_RESET_;
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
+ }
return 0;
}
+int t3b2_mac_watchdog_task(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ struct mac_stats *s = &mac->stats;
+ unsigned int tx_tcnt, tx_xcnt;
+ unsigned int tx_mcnt = s->tx_frames;
+ unsigned int rx_mcnt = s->rx_frames;
+ unsigned int rx_xcnt;
+ int status;
+
+ if (tx_mcnt == mac->tx_mcnt) {
+ tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ mac->offset)));
+ if (tx_xcnt == 0) {
+ t3_write_reg(adap, A_TP_PIO_ADDR,
+ A_TP_TX_DROP_CNT_CH0 + macidx(mac));
+ tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ } else {
+ mac->toggle_cnt = 0;
+ return 0;
+ }
+ } else {
+ mac->toggle_cnt = 0;
+ return 0;
+ }
+
+ if (((tx_tcnt != mac->tx_tcnt) &&
+ (tx_xcnt == 0) && (mac->tx_xcnt == 0)) ||
+ ((mac->tx_mcnt == tx_mcnt) &&
+ (tx_xcnt != 0) && (mac->tx_xcnt != 0))) {
+ if (mac->toggle_cnt > 4)
+ status = 2;
+ else
+ status = 1;
+ } else {
+ mac->toggle_cnt = 0;
+ return 0;
+ }
+
+ if (rx_mcnt != mac->rx_mcnt)
+ rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_RX_SPI4_SOP_EOP_CNT +
+ mac->offset)));
+ else
+ return 0;
+
+ if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0)
+ status = 2;
+
+ mac->tx_tcnt = tx_tcnt;
+ mac->tx_xcnt = tx_xcnt;
+ mac->tx_mcnt = s->tx_frames;
+ mac->rx_xcnt = rx_xcnt;
+ mac->rx_mcnt = s->rx_frames;
+ if (status == 1) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ mac->toggle_cnt++;
+ } else if (status == 2) {
+ t3b2_mac_reset(mac);
+ mac->toggle_cnt = 0;
+ }
+ return status;
+}
+
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the packet counters are only
@@ -373,7 +524,11 @@ const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
- mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+
+ v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+ if (mac->adapter->params.rev == T3_REV_B2)
+ v &= 0x7fffffff;
+ mac->stats.rx_too_long += v;
RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);