summaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c110
1 files changed, 45 insertions, 65 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab..87919419b70 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -351,7 +351,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
if (q->use_pages) {
- put_page(d->pg_chunk.page);
+ if (d->pg_chunk.page)
+ put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
kfree_skb(d->skb);
@@ -583,7 +584,7 @@ static void t3_reset_qset(struct sge_qset *q)
memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
q->txq_stopped = 0;
- memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
+ q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
kfree(q->lro_frag_tbl);
q->lro_nfrags = q->lro_frag_len = 0;
}
@@ -603,9 +604,6 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
int i;
struct pci_dev *pdev = adapter->pdev;
- if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
-
for (i = 0; i < SGE_RXQ_PER_SET; ++i)
if (q->fl[i].desc) {
spin_lock_irq(&adapter->sge.reg_lock);
@@ -1704,16 +1702,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
*/
static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
{
- skb->next = skb->prev = NULL;
- if (q->rx_tail)
- q->rx_tail->next = skb;
- else {
+ int was_empty = skb_queue_empty(&q->rx_queue);
+
+ __skb_queue_tail(&q->rx_queue, skb);
+
+ if (was_empty) {
struct sge_qset *qs = rspq_to_qset(q);
napi_schedule(&qs->napi);
- q->rx_head = skb;
}
- q->rx_tail = skb;
}
/**
@@ -1754,26 +1751,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (work_done < budget) {
- struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff_head queue;
int ngathered;
spin_lock_irq(&q->lock);
- head = q->rx_head;
- if (!head) {
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
napi_complete(napi);
spin_unlock_irq(&q->lock);
return work_done;
}
-
- tail = q->rx_tail;
- q->rx_head = q->rx_tail = NULL;
spin_unlock_irq(&q->lock);
- for (ngathered = 0; work_done < budget && head; work_done++) {
- prefetch(head->data);
- skbs[ngathered] = head;
- head = head->next;
- skbs[ngathered]->next = NULL;
+ ngathered = 0;
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ if (work_done >= budget)
+ break;
+ work_done++;
+
+ __skb_unlink(skb, &queue);
+ prefetch(skb->data);
+ skbs[ngathered] = skb;
if (++ngathered == RX_BUNDLE_SIZE) {
q->offload_bundles++;
adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1781,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
ngathered = 0;
}
}
- if (head) { /* splice remaining packets back onto Rx queue */
+ if (!skb_queue_empty(&queue)) {
+ /* splice remaining packets back onto Rx queue */
spin_lock_irq(&q->lock);
- tail->next = q->rx_head;
- if (!q->rx_head)
- q->rx_tail = tail;
- q->rx_head = head;
+ skb_queue_splice(&queue, &q->rx_queue);
spin_unlock_irq(&q->lock);
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -1937,38 +1935,6 @@ static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
}
-#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
- TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
- TCP_FLAG_SYN | TCP_FLAG_FIN)
-#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
-
-/**
- * lro_segment_ok - check if a TCP segment is eligible for LRO
- * @tcph: the TCP header of the packet
- *
- * Returns true if a TCP packet is eligible for LRO. This requires that
- * the packet have only the ACK flag set and no TCP options besides
- * time stamps.
- */
-static inline int lro_segment_ok(const struct tcphdr *tcph)
-{
- int optlen;
-
- if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
- return 0;
-
- optlen = (tcph->doff << 2) - sizeof(*tcph);
- if (optlen) {
- const u32 *opt = (const u32 *)(tcph + 1);
-
- if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
- *opt != htonl(TSTAMP_WORD) || !opt[2])
- return 0;
- }
- return 1;
-}
-
static int t3_get_lro_header(void **eh, void **iph, void **tcph,
u64 *hdr_flags, void *priv)
{
@@ -1981,9 +1947,6 @@ static int t3_get_lro_header(void **eh, void **iph, void **tcph,
*iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
*tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
- if (!lro_segment_ok(*tcph))
- return -1;
-
*hdr_flags = LRO_IPV4 | LRO_TCP;
return 0;
}
@@ -2878,9 +2841,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
struct net_lro_mgr *lro_mgr = &q->lro_mgr;
init_qset_cntxt(q, id);
- init_timer(&q->tx_reclaim_timer);
- q->tx_reclaim_timer.data = (unsigned long)q;
- q->tx_reclaim_timer.function = sge_timer_cb;
+ setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
sizeof(struct rx_desc),
@@ -2934,6 +2895,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->rspq.gen = 1;
q->rspq.size = p->rspq_size;
spin_lock_init(&q->rspq.lock);
+ skb_queue_head_init(&q->rspq.rx_queue);
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
@@ -3043,6 +3005,24 @@ err:
}
/**
+ * t3_stop_sge_timers - stop SGE timer call backs
+ * @adap: the adapter
+ *
+ * Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ del_timer_sync(&q->tx_reclaim_timer);
+ }
+}
+
+/**
* t3_free_sge_resources - free SGE resources
* @adap: the adapter
*