diff options
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r-- | drivers/net/cxgb3/adapter.h | 18 | ||||
-rw-r--r-- | drivers/net/cxgb3/common.h | 1 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_ctl_defs.h | 5 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_ioctl.h | 1 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 19 | ||||
-rw-r--r-- | drivers/net/cxgb3/cxgb3_offload.c | 32 | ||||
-rw-r--r-- | drivers/net/cxgb3/l2t.c | 2 | ||||
-rw-r--r-- | drivers/net/cxgb3/regs.h | 10 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 391 | ||||
-rw-r--r-- | drivers/net/cxgb3/t3_cpl.h | 51 | ||||
-rw-r--r-- | drivers/net/cxgb3/t3cdev.h | 4 |
11 files changed, 469 insertions, 65 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index acebe431d06..271140433b0 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -42,6 +42,7 @@ #include <linux/cache.h> #include <linux/mutex.h> #include <linux/bitops.h> +#include <linux/inet_lro.h> #include "t3cdev.h" #include <asm/io.h> @@ -92,6 +93,7 @@ struct sge_fl { /* SGE per free-buffer list state */ unsigned int gen; /* free list generation */ struct fl_pg_chunk pg_chunk;/* page chunk cache */ unsigned int use_pages; /* whether FL uses pages or sk_buffs */ + unsigned int order; /* order of page allocations */ struct rx_desc *desc; /* address of HW Rx descriptor ring */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ dma_addr_t phys_addr; /* physical address of HW ring start */ @@ -116,12 +118,15 @@ struct sge_rspq { /* state for an SGE response queue */ unsigned int polling; /* is the queue serviced through NAPI? */ unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ unsigned int next_holdoff; /* holdoff time for next interrupt */ + unsigned int rx_recycle_buf; /* whether recycling occurred + within current sop-eop */ struct rsp_desc *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ unsigned int cntxt_id; /* SGE context id for the response q */ spinlock_t lock; /* guards response processing */ struct sk_buff *rx_head; /* offload packet receive queue head */ struct sk_buff *rx_tail; /* offload packet receive queue tail */ + struct sk_buff *pg_skb; /* used to build frag list in napi handler */ unsigned long offload_pkts; unsigned long offload_bundles; @@ -169,16 +174,29 @@ enum { /* per port SGE statistics */ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ + SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */ + SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */ + SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */ SGE_PSTAT_MAX /* must be last */ }; +#define T3_MAX_LRO_SES 8 +#define T3_MAX_LRO_MAX_PKTS 64 + struct sge_qset { /* an SGE queue set */ struct adapter *adap; struct napi_struct napi; struct sge_rspq rspq; struct sge_fl fl[SGE_RXQ_PER_SET]; struct sge_txq txq[SGE_TXQ_PER_SET]; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[T3_MAX_LRO_SES]; + struct skb_frag_struct *lro_frag_tbl; + int lro_nfrags; + int lro_enabled; + int lro_frag_len; + void *lro_va; struct net_device *netdev; unsigned long txq_stopped; /* which Tx queues are stopped */ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 8e8ebd78853..9ecf8a6dc97 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -351,6 +351,7 @@ struct tp_params { struct qset_params { /* SGE queue set parameters */ unsigned int polling; /* polling/interrupt service for rspq */ + unsigned int lro; /* large receive offload */ unsigned int coalesce_usecs; /* irq coalescing timer */ unsigned int rspq_size; /* # of entries in response queue */ unsigned int fl_size; /* # of entries in regular free list */ diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h index ed0ecd9679c..6ad92405d9a 100644 --- a/drivers/net/cxgb3/cxgb3_ctl_defs.h +++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h @@ -111,10 +111,7 @@ struct ulp_iscsi_info { unsigned int llimit; unsigned int ulimit; unsigned int tagmask; - unsigned int pgsz3; - unsigned int pgsz2; - unsigned int pgsz1; - unsigned int pgsz0; + u8 pgsz_factor[4]; unsigned int max_rxsz; unsigned int max_txsz; struct pci_dev *pdev; diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h index 0a82fcddf2d..68200a14065 100644 --- a/drivers/net/cxgb3/cxgb3_ioctl.h +++ b/drivers/net/cxgb3/cxgb3_ioctl.h @@ -90,6 +90,7 @@ struct ch_qset_params { int32_t fl_size[2]; int32_t intr_lat; int32_t polling; + int32_t lro; int32_t cong_thres; }; diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 3a312721679..5447f3e60f0 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -1212,6 +1212,9 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "VLANinsertions ", "TxCsumOffload ", "RxCsumGood ", + "LroAggregated ", + "LroFlushed ", + "LroNoDesc ", "RxDrops ", "CheckTXEnToggled ", @@ -1340,6 +1343,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); *data++ = s->rx_cong_drops; *data++ = s->num_toggled; @@ -1558,6 +1564,13 @@ static int set_rx_csum(struct net_device *dev, u32 data) struct port_info *p = netdev_priv(dev); p->rx_csum_offload = data; + if (!data) { + struct adapter *adap = p->adapter; + int i; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) + adap->sge.qs[i].lro_enabled = 0; + } return 0; } @@ -1830,6 +1843,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) } } } + if (t.lro >= 0) { + struct sge_qset *qs = &adapter->sge.qs[t.qset_idx]; + q->lro = t.lro; + qs->lro_enabled = t.lro; + } break; } case CHELSIO_GET_QSET_PARAMS:{ @@ -1849,6 +1867,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) t.fl_size[0] = q->fl_size; t.fl_size[1] = q->jumbo_size; t.polling = q->polling; + t.lro = q->lro; t.intr_lat = q->coalesce_usecs; t.cong_thres = q->cong_thres; diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index cf269687379..c5b3de1bb45 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, break; case ULP_ISCSI_SET_PARAMS: t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); + /* set MaxRxData and MaxCoalesceSize to 16224 */ + t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60); + /* program the ddp page sizes */ + { + int i; + unsigned int val = 0; + for (i = 0; i < 4; i++) + val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); + if (val) + t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); + } break; default: ret = -EOPNOTSUPP; @@ -1255,6 +1266,25 @@ static inline void unregister_tdev(struct t3cdev *tdev) mutex_unlock(&cxgb3_db_lock); } +static inline int adap2type(struct adapter *adapter) +{ + int type = 0; + + switch (adapter->params.rev) { + case T3_REV_A: + type = T3A; + break; + case T3_REV_B: + case T3_REV_B2: + type = T3B; + break; + case T3_REV_C: + type = T3C; + break; + } + return type; +} + void __devinit cxgb3_adapter_ofld(struct adapter *adapter) { struct t3cdev *tdev = &adapter->tdev; @@ -1264,7 +1294,7 @@ void __devinit cxgb3_adapter_ofld(struct adapter *adapter) cxgb3_set_dummy_ops(tdev); tdev->send = t3_offload_tx; tdev->ctl = cxgb_offload_ctl; - tdev->type = adapter->params.rev == 0 ? T3A : T3B; + tdev->type = adap2type(adapter); register_tdev(tdev); } diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f510140885a..825e510bd9e 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c @@ -337,7 +337,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, atomic_set(&e->refcnt, 1); neigh_replace(e, neigh); if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) - e->vlan = vlan_dev_info(neigh->dev)->vlan_id; + e->vlan = vlan_dev_vlan_id(neigh->dev); else e->vlan = VLAN_NONE; spin_unlock(&e->lock); diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 56717887934..4bda27c551c 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -1517,16 +1517,18 @@ #define A_ULPRX_ISCSI_TAGMASK 0x514 -#define S_HPZ0 0 -#define M_HPZ0 0xf -#define V_HPZ0(x) ((x) << S_HPZ0) -#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) +#define A_ULPRX_ISCSI_PSZ 0x518 #define A_ULPRX_TDDP_LLIMIT 0x51c #define A_ULPRX_TDDP_ULIMIT 0x520 #define A_ULPRX_TDDP_PSZ 0x528 +#define S_HPZ0 0 +#define M_HPZ0 0xf +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + #define A_ULPRX_STAG_LLIMIT 0x52c #define A_ULPRX_STAG_ULIMIT 0x530 diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 796eb305cdc..a96331c875e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -55,6 +55,9 @@ * directly. */ #define FL0_PG_CHUNK_SIZE 2048 +#define FL0_PG_ORDER 0 +#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) +#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) #define SGE_RX_DROP_THRES 16 @@ -359,7 +362,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) } if (q->pg_chunk.page) { - __free_page(q->pg_chunk.page); + __free_pages(q->pg_chunk.page, q->order); q->pg_chunk.page = NULL; } } @@ -376,13 +379,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) * Add a buffer of the given length to the supplied HW and SW Rx * descriptors. */ -static inline void add_one_rx_buf(void *va, unsigned int len, - struct rx_desc *d, struct rx_sw_desc *sd, - unsigned int gen, struct pci_dev *pdev) +static inline int add_one_rx_buf(void *va, unsigned int len, + struct rx_desc *d, struct rx_sw_desc *sd, + unsigned int gen, struct pci_dev *pdev) { dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(mapping))) + return -ENOMEM; + pci_unmap_addr_set(sd, dma_addr, mapping); d->addr_lo = cpu_to_be32(mapping); @@ -390,12 +396,14 @@ static inline void add_one_rx_buf(void *va, unsigned int len, wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; } -static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) +static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp, + unsigned int order) { if (!q->pg_chunk.page) { - q->pg_chunk.page = alloc_page(gfp); + q->pg_chunk.page = alloc_pages(gfp, order); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); @@ -404,7 +412,7 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) sd->pg_chunk = q->pg_chunk; q->pg_chunk.offset += q->buf_size; - if (q->pg_chunk.offset == PAGE_SIZE) + if (q->pg_chunk.offset == (PAGE_SIZE << order)) q->pg_chunk.page = NULL; else { q->pg_chunk.va += q->buf_size; @@ -424,15 +432,18 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity. */ -static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) +static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { void *buf_start; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; + unsigned int count = 0; while (n--) { + int err; + if (q->use_pages) { - if (unlikely(alloc_pg_chunk(q, sd, gfp))) { + if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) { nomem: q->alloc_failed++; break; } @@ -447,8 +458,16 @@ nomem: q->alloc_failed++; buf_start = skb->data; } - add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, - adap->pdev); + err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, + adap->pdev); + if (unlikely(err)) { + if (!q->use_pages) { + kfree_skb(sd->skb); + sd->skb = NULL; + } + break; + } + d++; sd++; if (++q->pidx == q->size) { @@ -458,14 +477,19 @@ nomem: q->alloc_failed++; d = q->desc; } q->credits++; + count++; } wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + if (likely(count)) + t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + + return count; } static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) { - refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC); + refill_fl(adap, fl, min(16U, fl->size - fl->credits), + GFP_ATOMIC | __GFP_COMP); } /** @@ -560,6 +584,8 @@ static void t3_reset_qset(struct sge_qset *q) memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q->txq_stopped = 0; memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); + kfree(q->lro_frag_tbl); + q->lro_nfrags = q->lro_frag_len = 0; } @@ -740,19 +766,22 @@ use_orig_buf: * that are page chunks rather than sk_buffs. */ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, - unsigned int len, unsigned int drop_thres) + struct sge_rspq *q, unsigned int len, + unsigned int drop_thres) { - struct sk_buff *skb = NULL; + struct sk_buff *newskb, *skb; struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; - if (len <= SGE_RX_COPY_THRES) { - skb = alloc_skb(len, GFP_ATOMIC); - if (likely(skb != NULL)) { - __skb_put(skb, len); + newskb = skb = q->pg_skb; + + if (!skb && (len <= SGE_RX_COPY_THRES)) { + newskb = alloc_skb(len, GFP_ATOMIC); + if (likely(newskb != NULL)) { + __skb_put(newskb, len); pci_dma_sync_single_for_cpu(adap->pdev, pci_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); - memcpy(skb->data, sd->pg_chunk.va, len); + memcpy(newskb->data, sd->pg_chunk.va, len); pci_dma_sync_single_for_device(adap->pdev, pci_unmap_addr(sd, dma_addr), len, PCI_DMA_FROMDEVICE); @@ -761,14 +790,16 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, recycle: fl->credits--; recycle_rx_buf(adap, fl, fl->cidx); - return skb; + q->rx_recycle_buf++; + return newskb; } - if (unlikely(fl->credits <= drop_thres)) + if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) goto recycle; - skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); - if (unlikely(!skb)) { + if (!skb) + newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); + if (unlikely(!newskb)) { if (!drop_thres) return NULL; goto recycle; @@ -776,21 +807,29 @@ recycle: pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), fl->buf_size, PCI_DMA_FROMDEVICE); - __skb_put(skb, SGE_RX_PULL_LEN); - memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); - skb_fill_page_desc(skb, 0, sd->pg_chunk.page, - sd->pg_chunk.offset + SGE_RX_PULL_LEN, - len - SGE_RX_PULL_LEN); - skb->len = len; - skb->data_len = len - SGE_RX_PULL_LEN; - skb->truesize += skb->data_len; + if (!skb) { + __skb_put(newskb, SGE_RX_PULL_LEN); + memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); + skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, + sd->pg_chunk.offset + SGE_RX_PULL_LEN, + len - SGE_RX_PULL_LEN); + newskb->len = len; + newskb->data_len = len - SGE_RX_PULL_LEN; + } else { + skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, + sd->pg_chunk.page, + sd->pg_chunk.offset, len); + newskb->len += len; + newskb->data_len += len; + } + newskb->truesize += newskb->data_len; fl->credits--; /* * We do not refill FLs here, we let the caller do it to overlap a * prefetch. */ - return skb; + return newskb; } /** @@ -1831,9 +1870,10 @@ static void restart_tx(struct sge_qset *qs) * if it was immediate data in a response. */ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, - struct sk_buff *skb, int pad) + struct sk_buff *skb, int pad, int lro) { struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); + struct sge_qset *qs = rspq_to_qset(rq); struct port_info *pi; skb_pull(skb, sizeof(*p) + pad); @@ -1850,18 +1890,202 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, if (unlikely(p->vlan_valid)) { struct vlan_group *grp = pi->vlan_grp; - rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++; + qs->port_stats[SGE_PSTAT_VLANEX]++; if (likely(grp)) - __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), - rq->polling); + if (lro) + lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, + grp, + ntohs(p->vlan), + p); + else + __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), + rq->polling); else dev_kfree_skb_any(skb); - } else if (rq->polling) - netif_receive_skb(skb); - else + } else if (rq->polling) { + if (lro) + lro_receive_skb(&qs->lro_mgr, skb, p); + else + netif_receive_skb(skb); + } else netif_rx(skb); } +static inline int is_eth_tcp(u32 rss) +{ + return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; +} + +/** + * lro_frame_ok - check if an ingress packet is eligible for LRO + * @p: the CPL header of the packet + * + * Returns true if a received packet is eligible for LRO. + * The following conditions must be true: + * - packet is TCP/IP Ethernet II (checked elsewhere) + * - not an IP fragment + * - no IP options + * - TCP/IP checksums are correct + * - the packet is for this host + */ +static inline int lro_frame_ok(const struct cpl_rx_pkt *p) +{ + const struct ethhdr *eh = (struct ethhdr *)(p + 1); + const struct iphdr *ih = (struct iphdr *)(eh + 1); + + return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) && + eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); +} + +#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\ + TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\ + TCP_FLAG_SYN | TCP_FLAG_FIN) +#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\ + (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) + +/** + * lro_segment_ok - check if a TCP segment is eligible for LRO + * @tcph: the TCP header of the packet + * + * Returns true if a TCP packet is eligible for LRO. This requires that + * the packet have only the ACK flag set and no TCP options besides + * time stamps. + */ +static inline int lro_segment_ok(const struct tcphdr *tcph) +{ + int optlen; + + if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK)) + return 0; + + optlen = (tcph->doff << 2) - sizeof(*tcph); + if (optlen) { + const u32 *opt = (const u32 *)(tcph + 1); + + if (optlen != TCPOLEN_TSTAMP_ALIGNED || + *opt != htonl(TSTAMP_WORD) || !opt[2]) + return 0; + } + return 1; +} + +static int t3_get_lro_header(void **eh, void **iph, void **tcph, + u64 *hdr_flags, void *priv) +{ + const struct cpl_rx_pkt *cpl = priv; + + if (!lro_frame_ok(cpl)) + return -1; + + *eh = (struct ethhdr *)(cpl + 1); + *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); + *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); + + if (!lro_segment_ok(*tcph)) + return -1; + + *hdr_flags = LRO_IPV4 | LRO_TCP; + return 0; +} + +static int t3_get_skb_header(struct sk_buff *skb, + void **iph, void **tcph, u64 *hdr_flags, + void *priv) +{ + void *eh; + + return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv); +} + +static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh, + void **iph, void **tcph, u64 *hdr_flags, + void *priv) +{ + return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv); +} + +/** + * lro_add_page - add a page chunk to an LRO session + * @adap: the adapter + * @qs: the associated queue set + * @fl: the free list containing the page chunk to add + * @len: packet length + * @complete: Indicates the last fragment of a frame + * + * Add a received packet contained in a page chunk to an existing LRO + * session. + */ +static void lro_add_page(struct adapter *adap, struct sge_qset *qs, + struct sge_fl *fl, int len, int complete) +{ + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + struct cpl_rx_pkt *cpl; + struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; + int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; + int offset = 0; + + if (!nr_frags) { + offset = 2 + sizeof(struct cpl_rx_pkt); + qs->lro_va = cpl = sd->pg_chunk.va + 2; + } + + fl->credits--; + + len -= offset; + pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), + fl->buf_size, PCI_DMA_FROMDEVICE); + + rx_frag += nr_frags; + rx_frag->page = sd->pg_chunk.page; + rx_frag->page_offset = sd->pg_chunk.offset + offset; + rx_frag->size = len; + frag_len += len; + qs->lro_nfrags++; + qs->lro_frag_len = frag_len; + + if (!complete) + return; + + qs->lro_nfrags = qs->lro_frag_len = 0; + cpl = qs->lro_va; + + if (unlikely(cpl->vlan_valid)) { + struct net_device *dev = qs->netdev; + struct port_info *pi = netdev_priv(dev); + struct vlan_group *grp = pi->vlan_grp; + + if (likely(grp != NULL)) { + lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, + qs->lro_frag_tbl, + frag_len, frag_len, + grp, ntohs(cpl->vlan), + cpl, 0); + return; + } + } + lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, + frag_len, frag_len, cpl, 0); +} + +/** + * init_lro_mgr - initialize a LRO manager object + * @lro_mgr: the LRO manager object + */ +static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr) +{ + lro_mgr->dev = qs->netdev; + lro_mgr->features = LRO_F_NAPI; + lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_mgr->max_desc = T3_MAX_LRO_SES; + lro_mgr->lro_arr = qs->lro_desc; + lro_mgr->get_frag_header = t3_get_frag_header; + lro_mgr->get_skb_header = t3_get_skb_header; + lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS; + if (lro_mgr->max_aggr > MAX_SKB_FRAGS) + lro_mgr->max_aggr = MAX_SKB_FRAGS; +} + /** * handle_rsp_cntrl_info - handles control information in a response * @qs: the queue set corresponding to the response @@ -1947,6 +2171,12 @@ static inline int is_new_response(const struct rsp_desc *r, return (r->intr_gen & F_RSPD_GEN2) == q->gen; } +static inline void clear_rspq_bufstate(struct sge_rspq * const q) +{ + q->pg_skb = NULL; + q->rx_recycle_buf = 0; +} + #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ @@ -1984,10 +2214,11 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, q->next_holdoff = q->holdoff_tmr; while (likely(budget_left && is_new_response(r, q))) { - int eth, ethpad = 2; + int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; struct sk_buff *skb = NULL; u32 len, flags = ntohl(r->flags); - __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val; + __be32 rss_hi = *(const __be32 *)r, + rss_lo = r->rss_hdr.rss_hash_val; eth = r->rss_hdr.opcode == CPL_RX_PKT; @@ -2015,6 +2246,9 @@ no_mem: } else if ((len = ntohl(r->len_cq)) != 0) { struct sge_fl *fl; + if (eth) + lro = qs->lro_enabled && is_eth_tcp(rss_hi); + fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; if (fl->use_pages) { void *addr = fl->sdesc[fl->cidx].pg_chunk.va; @@ -2024,9 +2258,18 @@ no_mem: prefetch(addr + L1_CACHE_BYTES); #endif __refill_fl(adap, fl); + if (lro > 0) { + lro_add_page(adap, qs, fl, + G_RSPD_LEN(len), + flags & F_RSPD_EOP); + goto next_fl; + } - skb = get_packet_pg(adap, fl, G_RSPD_LEN(len), - eth ? SGE_RX_DROP_THRES : 0); + skb = get_packet_pg(adap, fl, q, + G_RSPD_LEN(len), + eth ? + SGE_RX_DROP_THRES : 0); + q->pg_skb = skb; } else skb = get_packet(adap, fl, G_RSPD_LEN(len), eth ? SGE_RX_DROP_THRES : 0); @@ -2036,7 +2279,7 @@ no_mem: q->rx_drops++; } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) __skb_pull(skb, 2); - +next_fl: if (++fl->cidx == fl->size) fl->cidx = 0; } else @@ -2060,9 +2303,13 @@ no_mem: q->credits = 0; } - if (likely(skb != NULL)) { + packet_complete = flags & + (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | + F_RSPD_ASYNC_NOTIF); + + if (skb != NULL && packet_complete) { if (eth) - rx_eth(adap, q, skb, ethpad); + rx_eth(adap, q, skb, ethpad, lro); else { q->offload_pkts++; /* Preserve the RSS info in csum & priority */ @@ -2072,11 +2319,19 @@ no_mem: offload_skbs, ngathered); } + + if (flags & F_RSPD_EOP) + clear_rspq_bufstate(q); } --budget_left; } deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); + lro_flush_all(&qs->lro_mgr); + qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated; + qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed; + qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc; + if (sleeping) check_ring_db(adap, qs, sleeping); @@ -2618,8 +2873,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, int ntxq, struct net_device *dev) { - int i, ret = -ENOMEM; + int i, avail, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; + struct net_lro_mgr *lro_mgr = &q->lro_mgr; init_qset_cntxt(q, id); init_timer(&q->tx_reclaim_timer); @@ -2687,11 +2943,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, #else q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); #endif - q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; +#if FL1_PG_CHUNK_SIZE > 0 + q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; +#else q->fl[1].buf_size = is_offload(adapter) ? (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); +#endif + q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; + q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; + q->fl[0].order = FL0_PG_ORDER; + q->fl[1].order = FL1_PG_ORDER; + + q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1, + sizeof(struct skb_frag_struct), + GFP_KERNEL); + q->lro_nfrags = q->lro_frag_len = 0; spin_lock_irq(&adapter->sge.reg_lock); /* FL threshold comparison uses < */ @@ -2742,8 +3010,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, q->netdev = dev; t3_update_qset_coalesce(q, p); - refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); - refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); + init_lro_mgr(q, lro_mgr); + + avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, + GFP_KERNEL | __GFP_COMP); + if (!avail) { + CH_ALERT(adapter, "free list queue 0 initialization failed\n"); + goto err; + } + if (avail < q->fl[0].size) + CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", + avail); + + avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, + GFP_KERNEL | __GFP_COMP); + if (avail < q->fl[1].size) + CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", + avail); refill_rspq(adapter, &q->rspq, q->rspq.size - 1); t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | @@ -2752,9 +3035,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); return 0; - err_unlock: +err_unlock: spin_unlock_irq(&adapter->sge.reg_lock); - err: +err: t3_free_qset(adapter, q); return ret; } @@ -2876,7 +3159,7 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p) q->coalesce_usecs = 5; q->rspq_size = 1024; q->fl_size = 1024; - q->jumbo_size = 512; + q->jumbo_size = 512; q->txq_size[TXQ_ETH] = 1024; q->txq_size[TXQ_OFLD] = 1024; q->txq_size[TXQ_CTRL] = 256; diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h index b7a1a310dfd..917970ed24a 100644 --- a/drivers/net/cxgb3/t3_cpl.h +++ b/drivers/net/cxgb3/t3_cpl.h @@ -174,6 +174,13 @@ enum { /* TCP congestion control algorithms */ CONG_ALG_HIGHSPEED }; +enum { /* RSS hash type */ + RSS_HASH_NONE = 0, + RSS_HASH_2_TUPLE = 1, + RSS_HASH_4_TUPLE = 2, + RSS_HASH_TCPV6 = 3 +}; + union opcode_tid { __be32 opcode_tid; __u8 opcode; @@ -184,6 +191,13 @@ union opcode_tid { #define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) #define G_TID(x) ((x) & 0xFFFFFF) +#define S_QNUM 0 +#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF) + +#define S_HASHTYPE 22 +#define M_HASHTYPE 0x3 +#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) + /* tid is assumed to be 24-bits */ #define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) @@ -768,6 +782,12 @@ struct tx_data_wr { __be32 param; }; +/* tx_data_wr.flags fields */ +#define S_TX_ACK_PAGES 21 +#define M_TX_ACK_PAGES 0x7 +#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES) +#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES) + /* tx_data_wr.param fields */ #define S_TX_PORT 0 #define M_TX_PORT 0x7 @@ -1441,4 +1461,35 @@ struct cpl_rdma_terminate { #define M_TERM_TID 0xFFFFF #define V_TERM_TID(x) ((x) << S_TERM_TID) #define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) + +/* ULP_TX opcodes */ +enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 }; + +#define S_ULPTX_CMD 28 +#define M_ULPTX_CMD 0xF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULPTX_NFLITS 0 +#define M_ULPTX_NFLITS 0xFF +#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS) + +struct ulp_mem_io { + WR_HDR; + __be32 cmd_lock_addr; + __be32 len; +}; + +/* ulp_mem_io.cmd_lock_addr fields */ +#define S_ULP_MEMIO_ADDR 0 +#define M_ULP_MEMIO_ADDR 0x7FFFFFF +#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR) +#define S_ULP_MEMIO_LOCK 27 +#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK) +#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U) + +/* ulp_mem_io.len fields */ +#define S_ULP_MEMIO_DATA_LEN 28 +#define M_ULP_MEMIO_DATA_LEN 0xF +#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN) + #endif /* T3_CPL_H */ diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h index a18c8a14042..0a21cfbd2b2 100644 --- a/drivers/net/cxgb3/t3cdev.h +++ b/drivers/net/cxgb3/t3cdev.h @@ -45,7 +45,8 @@ struct cxgb3_client; enum t3ctype { T3A = 0, - T3B + T3B, + T3C, }; struct t3cdev { @@ -63,6 +64,7 @@ struct t3cdev { void *l3opt; /* optional layer 3 data */ void *l4opt; /* optional layer 4 data */ void *ulp; /* ulp stuff */ + void *ulp_iscsi; /* ulp iscsi */ }; #endif /* _T3CDEV_H_ */ |