summaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-23 11:47:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-23 11:47:02 -0700
commit5f05647dd81c11a6a165ccc8f0c1370b16f3bcb0 (patch)
tree7851ef1c93aa1aba7ef327ca4b75fd35e6d10f29 /drivers/net/cxgb4
parent02f36038c568111ad4fc433f6fa760ff5e38fab4 (diff)
parentec37a48d1d16c30b655ac5280209edf52a6775d4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1699 commits) bnx2/bnx2x: Unsupported Ethtool operations should return -EINVAL. vlan: Calling vlan_hwaccel_do_receive() is always valid. tproxy: use the interface primary IP address as a default value for --on-ip tproxy: added IPv6 support to the socket match cxgb3: function namespace cleanup tproxy: added IPv6 support to the TPROXY target tproxy: added IPv6 socket lookup function to nf_tproxy_core be2net: Changes to use only priority codes allowed by f/w tproxy: allow non-local binds of IPv6 sockets if IP_TRANSPARENT is enabled tproxy: added tproxy sockopt interface in the IPV6 layer tproxy: added udp6_lib_lookup function tproxy: added const specifiers to udp lookup functions tproxy: split off ipv6 defragmentation to a separate module l2tp: small cleanup nf_nat: restrict ICMP translation for embedded header can: mcp251x: fix generation of error frames can: mcp251x: fix endless loop in interrupt handler if CANINTF_MERRF is set can-raw: add msg_flags to distinguish local traffic 9p: client code cleanup rds: make local functions/variables static ... Fix up conflicts in net/core/dev.c, drivers/net/pcmcia/smc91c92_cs.c and drivers/net/wireless/ath/ath9k/debug.c as per David
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r--drivers/net/cxgb4/cxgb4.h17
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c167
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/cxgb4/l2t.c34
-rw-r--r--drivers/net/cxgb4/l2t.h3
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.c332
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
9 files changed, 96 insertions, 488 deletions
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7..eaa49e4119f 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
u8 counter_val[SGE_NCOUNTERS];
unsigned int starve_thres;
u8 idma_state[2];
+ unsigned int egr_start;
+ unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
@@ -590,7 +592,6 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void *t4_alloc_mem(size_t size);
-void t4_free_mem(void *addr);
void t4_free_sge_resources(struct adapter *adap);
irq_handler_t t4_intr_handler(struct adapter *adap);
@@ -649,7 +650,6 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
-void t4_intr_clear(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(struct adapter *adap);
@@ -662,24 +662,16 @@ int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
-int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
- int filter_index, int enable);
-void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
- int filter_index, int *enabled);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
-int t4_read_rss(struct adapter *adapter, u16 *entries);
int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
-void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
-
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
-void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -709,8 +701,6 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
-int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
- unsigned int vf, unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
@@ -729,9 +719,6 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
-int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
- unsigned int pf, unsigned int vf, unsigned int iqid,
- unsigned int fl0id, unsigned int fl1id);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index e2bf10d90ad..87054e0a574 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0xa000, 0), /* PE10K */
- CH_DEVICE(0x4001, 0),
- CH_DEVICE(0x4002, 0),
- CH_DEVICE(0x4003, 0),
- CH_DEVICE(0x4004, 0),
- CH_DEVICE(0x4005, 0),
- CH_DEVICE(0x4006, 0),
- CH_DEVICE(0x4007, 0),
- CH_DEVICE(0x4008, 0),
- CH_DEVICE(0x4009, 0),
- CH_DEVICE(0x400a, 0),
+ CH_DEVICE(0x4001, -1),
+ CH_DEVICE(0x4002, -1),
+ CH_DEVICE(0x4003, -1),
+ CH_DEVICE(0x4004, -1),
+ CH_DEVICE(0x4005, -1),
+ CH_DEVICE(0x4006, -1),
+ CH_DEVICE(0x4007, -1),
+ CH_DEVICE(0x4008, -1),
+ CH_DEVICE(0x4009, -1),
+ CH_DEVICE(0x400a, -1),
+ CH_DEVICE(0x4401, 4),
+ CH_DEVICE(0x4402, 4),
+ CH_DEVICE(0x4403, 4),
+ CH_DEVICE(0x4404, 4),
+ CH_DEVICE(0x4405, 4),
+ CH_DEVICE(0x4406, 4),
+ CH_DEVICE(0x4407, 4),
+ CH_DEVICE(0x4408, 4),
+ CH_DEVICE(0x4409, 4),
+ CH_DEVICE(0x440a, 4),
{ 0, }
};
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
- struct sge_txq *txq = q->adap->sge.egr_map[qid];
+ struct sge_txq *txq;
+ txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+ if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
}
/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+ qid -= p->ingr_start;
+ return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
* Wait until all NAPI handlers are descheduled.
*/
static void quiesce_rx(struct adapter *adap)
@@ -860,7 +880,7 @@ void *t4_alloc_mem(size_t size)
/*
* Free memory allocated through alloc_mem().
*/
-void t4_free_mem(void *addr)
+static void t4_free_mem(void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
-/*
- * Translate a physical EEPROM address to virtual. The first 1K is accessed
- * through virtual addresses starting at 31K, the rest is accessed through
- * virtual addresses starting at 0. This mapping is correct only for PF0.
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
*/
-static int eeprom_ptov(unsigned int phys_addr)
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
{
+ fn *= sz;
if (phys_addr < 1024)
return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
if (phys_addr < EEPROMSIZE)
- return phys_addr - 1024;
+ return phys_addr - 1024 - fn;
return -EINVAL;
}
/*
* The next two routines implement eeprom read/write from physical addresses.
- * The physical->virtual translation is correct only for PF0.
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/*
* RMW possibly needed for first or last words.
@@ -2165,8 +2207,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
* Queue a TID release request and if necessary schedule a work queue to
* process it.
*/
-void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
- unsigned int tid)
+static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+ unsigned int tid)
{
void **p = &t->tid_tab[tid];
struct adapter *adap = container_of(t, struct adapter, tids);
@@ -2181,7 +2223,6 @@ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
}
spin_unlock_bh(&adap->tid_release_lock);
}
-EXPORT_SYMBOL(cxgb4_queue_tid_release);
/*
* Process the list of pending TID release requests.
@@ -2305,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_port = htons(0);
req->local_ip = sip;
req->peer_ip = htonl(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2314,48 +2355,6 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
EXPORT_SYMBOL(cxgb4_create_server);
/**
- * cxgb4_create_server6 - create an IPv6 server
- * @dev: the device
- * @stid: the server TID
- * @sip: local IPv6 address to bind server to
- * @sport: the server's TCP port
- * @queue: queue to direct messages from this server to
- *
- * Create an IPv6 server for the given port and address.
- * Returns <0 on error and one of the %NET_XMIT_* values on success.
- */
-int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
- const struct in6_addr *sip, __be16 sport,
- unsigned int queue)
-{
- unsigned int chan;
- struct sk_buff *skb;
- struct adapter *adap;
- struct cpl_pass_open_req6 *req;
-
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
- req->local_port = sport;
- req->peer_port = htons(0);
- req->local_ip_hi = *(__be64 *)(sip->s6_addr);
- req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
- req->peer_ip_hi = cpu_to_be64(0);
- req->peer_ip_lo = cpu_to_be64(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
- req->opt0 = cpu_to_be64(TX_CHAN(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
- SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
- return t4_mgmt_tx(adap, skb);
-}
-EXPORT_SYMBOL(cxgb4_create_server6);
-
-/**
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
* @mtus: the HW MTU table
* @mtu: the target MTU
@@ -2414,25 +2413,6 @@ unsigned int cxgb4_port_idx(const struct net_device *dev)
}
EXPORT_SYMBOL(cxgb4_port_idx);
-/**
- * cxgb4_netdev_by_hwid - return the net device of a HW port
- * @pdev: identifies the adapter
- * @id: the HW port id
- *
- * Return the net device associated with the interface with the given HW
- * id.
- */
-struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
-{
- const struct adapter *adap = pci_get_drvdata(pdev);
-
- if (!adap || id >= NCHAN)
- return NULL;
- id = adap->chan_map[id];
- return id < MAX_NPORTS ? adap->port[id] : NULL;
-}
-EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
-
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
@@ -2722,7 +2702,10 @@ static int cxgb_open(struct net_device *dev)
return err;
}
- dev->real_num_tx_queues = pi->nqsets;
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -3062,12 +3045,16 @@ static int adap_init0(struct adapter *adap)
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
+ params[5] = FW_PARAM_PFVF(IQFLINT_START);
+ params[6] = FW_PARAM_PFVF(EQ_START);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
if (ret < 0)
goto bye;
port_vec = val[0];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
+ adap->sge.ingr_start = val[5];
+ adap->sge.egr_start = val[6];
if (c.ofldcaps) {
/* query offload-related parameters */
@@ -3815,7 +3802,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- } else if (PCI_FUNC(pdev->devfn) > 0)
+ } else
pci_release_regions(pdev);
}
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 85d74e751ce..1b48c017014 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -139,16 +139,11 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
-void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
- unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, unsigned int queue);
-int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
- const struct in6_addr *sip, __be16 sport,
- unsigned int queue);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
@@ -233,7 +228,6 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
-struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index e8f0f55e9d0..a2d323c473f 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -481,40 +481,6 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
-/*
- * Allocate an L2T entry for use by a switching rule. Such entries need to be
- * explicitly freed and while busy they are not on any hash chain, so normal
- * address resolution updates do not see them.
- */
-struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
-{
- struct l2t_entry *e;
-
- write_lock_bh(&d->lock);
- e = alloc_l2e(d);
- if (e) {
- spin_lock(&e->lock); /* avoid race with t4_l2t_free */
- e->state = L2T_STATE_SWITCHING;
- atomic_set(&e->refcnt, 1);
- spin_unlock(&e->lock);
- }
- write_unlock_bh(&d->lock);
- return e;
-}
-
-/*
- * Sets/updates the contents of a switching L2T entry that has been allocated
- * with an earlier call to @t4_l2t_alloc_switching.
- */
-int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
- u8 port, u8 *eth_addr)
-{
- e->vlan = vlan;
- e->lport = port;
- memcpy(e->dmac, eth_addr, ETH_ALEN);
- return write_l2e(adap, e, 0);
-}
-
struct l2t_data *t4_init_l2t(void)
{
int i;
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 643f27ed3cf..7bd8f42378f 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -100,9 +100,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
-struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
-int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
- u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc5756..9967f3debce 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
if (unlikely(fl_starving(q))) {
smp_wmb();
- set_bit(q->cntxt_id, adap->sge.starving_fl);
+ set_bit(q->cntxt_id - adap->sge.egr_start,
+ adap->sge.starving_fl);
}
return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
{
q->mapping_err++;
q->q.stops++;
- set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
+ set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+ q->adap->sge.txq_maperr);
}
/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.rx_cso++;
}
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
+ qid -= adap->sge.ingr_start;
napi_schedule(&adap->sge.ingr_map[qid]->napi);
}
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
- adap->sge.ingr_map[iq->cntxt_id] = iq;
+ adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
if (fl) {
fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
- adap->sge.egr_map[fl->cntxt_id] = fl;
+ adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
q->cntxt_id = id;
- adap->sge.egr_map[id] = q;
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
{
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
- adap->sge.ingr_map[rq->cntxt_id] = NULL;
+ adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index 9e1a4b49b47..bb813d94aea 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -120,30 +120,6 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
-#if 0
-/**
- * t4_write_indirect - write indirectly addressed registers
- * @adap: the adapter
- * @addr_reg: register holding the indirect addresses
- * @data_reg: register holding the value for the indirect registers
- * @vals: values to write
- * @nregs: how many indirect registers to write
- * @start_idx: address of first indirect register to write
- *
- * Writes a sequential block of registers that are accessed indirectly
- * through an address/data register pair.
- */
-static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, const u32 *vals,
- unsigned int nregs, unsigned int start_idx)
-{
- while (nregs--) {
- t4_write_reg(adap, addr_reg, start_idx++);
- t4_write_reg(adap, data_reg, *vals++);
- }
-}
-#endif
-
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
@@ -1560,44 +1536,6 @@ void t4_intr_disable(struct adapter *adapter)
}
/**
- * t4_intr_clear - clear all interrupts
- * @adapter: the adapter whose interrupts should be cleared
- *
- * Clears all interrupts. The caller must be a PCI function managing
- * global interrupts.
- */
-void t4_intr_clear(struct adapter *adapter)
-{
- static const unsigned int cause_reg[] = {
- SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
- MC_INT_CAUSE,
- MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
- EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
- CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
- MYPF_REG(CIM_PF_HOST_INT_CAUSE),
- TP_INT_CAUSE,
- ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
- PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
- MPS_RX_PERR_INT_CAUSE,
- CPL_INTR_CAUSE,
- MYPF_REG(PL_PF_INT_CAUSE),
- PL_PL_INT_CAUSE,
- LE_DB_INT_CAUSE,
- };
-
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
- t4_write_reg(adapter, cause_reg[i], 0xffffffff);
-
- t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
- (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
-}
-
-/**
* hash_mac_addr - return the hash value of a MAC address
* @addr: the 48-bit Ethernet MAC address
*
@@ -1709,36 +1647,6 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
-/* Read an RSS table row */
-static int rd_rss_row(struct adapter *adap, int row, u32 *val)
-{
- t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
- return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
- 5, 0, val);
-}
-
-/**
- * t4_read_rss - read the contents of the RSS mapping table
- * @adapter: the adapter
- * @map: holds the contents of the RSS mapping table
- *
- * Reads the contents of the RSS hash->queue mapping table.
- */
-int t4_read_rss(struct adapter *adapter, u16 *map)
-{
- u32 val;
- int i, ret;
-
- for (i = 0; i < RSS_NENTRIES / 2; ++i) {
- ret = rd_rss_row(adapter, i, &val);
- if (ret)
- return ret;
- *map++ = LKPTBLQUEUE0_GET(val);
- *map++ = LKPTBLQUEUE1_GET(val);
- }
- return 0;
-}
-
/**
* t4_tp_get_tcp_stats - read TP's TCP MIB counters
* @adap: the adapter
@@ -1779,29 +1687,6 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
}
/**
- * t4_tp_get_err_stats - read TP's error MIB counters
- * @adap: the adapter
- * @st: holds the counter values
- *
- * Returns the values of TP's error counters.
- */
-void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
-{
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
- 12, TP_MIB_MAC_IN_ERR_0);
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
- 8, TP_MIB_TNL_CNG_DROP_0);
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
- 4, TP_MIB_TNL_DROP_0);
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
- 4, TP_MIB_OFD_VLN_DROP_0);
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
- 4, TP_MIB_TCP_V6IN_ERR_0);
- t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
- 2, TP_MIB_OFD_ARP_DROP);
-}
-
-/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
@@ -1916,122 +1801,6 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
}
/**
- * t4_set_trace_filter - configure one of the tracing filters
- * @adap: the adapter
- * @tp: the desired trace filter parameters
- * @idx: which filter to configure
- * @enable: whether to enable or disable the filter
- *
- * Configures one of the tracing filters available in HW. If @enable is
- * %0 @tp is not examined and may be %NULL.
- */
-int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
- int idx, int enable)
-{
- int i, ofst = idx * 4;
- u32 data_reg, mask_reg, cfg;
- u32 multitrc = TRCMULTIFILTER;
-
- if (!enable) {
- t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
- goto out;
- }
-
- if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
- tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
- tp->snap_len > 9600 || (idx && tp->snap_len > 256))
- return -EINVAL;
-
- if (tp->snap_len > 256) { /* must be tracer 0 */
- if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
- t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
- t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
- return -EINVAL; /* other tracers are enabled */
- multitrc = 0;
- } else if (idx) {
- i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
- if (TFCAPTUREMAX_GET(i) > 256 &&
- (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
- return -EINVAL;
- }
-
- /* stop the tracer we'll be changing */
- t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
-
- /* disable tracing globally if running in the wrong single/multi mode */
- cfg = t4_read_reg(adap, MPS_TRC_CFG);
- if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
- t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
- t4_read_reg(adap, MPS_TRC_CFG); /* flush */
- msleep(1);
- if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
- return -ETIMEDOUT;
- }
- /*
- * At this point either the tracing is enabled and in the right mode or
- * disabled.
- */
-
- idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
- data_reg = MPS_TRC_FILTER0_MATCH + idx;
- mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
-
- for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
- t4_write_reg(adap, data_reg, tp->data[i]);
- t4_write_reg(adap, mask_reg, ~tp->mask[i]);
- }
- t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
- TFCAPTUREMAX(tp->snap_len) |
- TFMINPKTSIZE(tp->min_len));
- t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
- TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
- TFPORT(tp->port) | TFEN |
- (tp->invert ? TFINVERTMATCH : 0));
-
- cfg &= ~TRCMULTIFILTER;
- t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
-out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
- return 0;
-}
-
-/**
- * t4_get_trace_filter - query one of the tracing filters
- * @adap: the adapter
- * @tp: the current trace filter parameters
- * @idx: which trace filter to query
- * @enabled: non-zero if the filter is enabled
- *
- * Returns the current settings of one of the HW tracing filters.
- */
-void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
- int *enabled)
-{
- u32 ctla, ctlb;
- int i, ofst = idx * 4;
- u32 data_reg, mask_reg;
-
- ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
- ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
-
- *enabled = !!(ctla & TFEN);
- tp->snap_len = TFCAPTUREMAX_GET(ctlb);
- tp->min_len = TFMINPKTSIZE_GET(ctlb);
- tp->skip_ofst = TFOFFSET_GET(ctla);
- tp->skip_len = TFLENGTH_GET(ctla);
- tp->invert = !!(ctla & TFINVERTMATCH);
- tp->port = TFPORT_GET(ctla);
-
- ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
- data_reg = MPS_TRC_FILTER0_MATCH + ofst;
- mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
-
- for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
- tp->mask[i] = ~t4_read_reg(adap, mask_reg);
- tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
- }
-}
-
-/**
* get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -2133,52 +1902,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
}
/**
- * t4_get_lb_stats - collect loopback port statistics
- * @adap: the adapter
- * @idx: the loopback port index
- * @p: the stats structure to fill
- *
- * Return HW statistics for the given loopback port.
- */
-void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
-{
- u32 bgmap = get_mps_bg_map(adap, idx);
-
-#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
-#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
-
- p->octets = GET_STAT(BYTES);
- p->frames = GET_STAT(FRAMES);
- p->bcast_frames = GET_STAT(BCAST);
- p->mcast_frames = GET_STAT(MCAST);
- p->ucast_frames = GET_STAT(UCAST);
- p->error_frames = GET_STAT(ERROR);
-
- p->frames_64 = GET_STAT(64B);
- p->frames_65_127 = GET_STAT(65B_127B);
- p->frames_128_255 = GET_STAT(128B_255B);
- p->frames_256_511 = GET_STAT(256B_511B);
- p->frames_512_1023 = GET_STAT(512B_1023B);
- p->frames_1024_1518 = GET_STAT(1024B_1518B);
- p->frames_1519_max = GET_STAT(1519B_MAX);
- p->drop = t4_read_reg(adap, PORT_REG(idx,
- MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
-
- p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
- p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
- p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
- p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
- p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
- p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
- p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
- p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
-
-#undef GET_STAT
-#undef GET_STAT_COM
-}
-
-/**
* t4_wol_magic_enable - enable/disable magic packet WoL
* @adap: the adapter
* @port: the physical port index
@@ -2584,30 +2307,6 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
/**
- * t4_free_vi - free a virtual interface
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @pf: the PF owning the VI
- * @vf: the VF owning the VI
- * @viid: virtual interface identifiler
- *
- * Free a previously allocated virtual interface.
- */
-int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
- unsigned int vf, unsigned int viid)
-{
- struct fw_vi_cmd c;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
- FW_VI_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
- c.type_viid = htons(FW_VI_CMD_VIID(viid));
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-}
-
-/**
* t4_set_rxmode - set Rx properties of a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -2833,37 +2532,6 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
- * t4_iq_start_stop - enable/disable an ingress queue and its FLs
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @start: %true to enable the queues, %false to disable them
- * @pf: the PF owning the queues
- * @vf: the VF owning the queues
- * @iqid: ingress queue id
- * @fl0id: FL0 queue id or 0xffff if no attached FL0
- * @fl1id: FL1 queue id or 0xffff if no attached FL1
- *
- * Starts or stops an ingress queue and its associated FLs, if any.
- */
-int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
- unsigned int pf, unsigned int vf, unsigned int iqid,
- unsigned int fl0id, unsigned int fl1id)
-{
- struct fw_iq_cmd c;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
- FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
- FW_IQ_CMD_VFN(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
- FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
- c.iqid = htons(iqid);
- c.fl0id = htons(fl0id);
- c.fl1id = htons(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
-}
-
-/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a05556577..c26b455f37d 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
EEPROMSIZE = 17408, /* Serial EEPROM physical size */
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b..940584a8a64 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
+ FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+ FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
+ FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
};
/*