summaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb/ixgb_main.c
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2007-01-06 09:51:23 -0800
committerAuke Kok <juke-jan.h.kok@intel.com>2007-01-06 09:51:23 -0800
commit5d9278537502d2e404e85485d1b905814fe728c0 (patch)
treeaa6f4d4493a8f67f08bd36f57d6f600a468c5655 /drivers/net/ixgb/ixgb_main.c
parent81f4e6c190a0fa016fd7eecaf76a5f95d121afc2 (diff)
ixgb: Fix early TSO completion
This fix was already merged in commit 96f9c2e277768099479fbed7c3b69c294b1fadef but reverted in commit 989316ddfeafd0e8fb51a4d811383769ad62637a. After stresstesting we found that the fix does not add new regressions and works around a TX hang spotted by several users. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers/net/ixgb/ixgb_main.c')
-rw-r--r--drivers/net/ixgb/ixgb_main.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e628126c9c4..70ac9d4a83b 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
struct ixgb_buffer *buffer_info;
int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+
buffer_info->length = size;
WARN_ON(buffer_info->dma != 0);
buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD);
+
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len
+ && size > 8))
+ size -= 4;
+
buffer_info->length = size;
buffer_info->dma =
pci_map_page(adapter->pdev,
@@ -1401,8 +1414,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
(((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
+ + 1 /* one more needed for sentinel TSO workaround */
static int
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)