diff options
author | Santiago Leon <santil@us.ibm.com> | 2008-08-20 13:09:19 -0600 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-08-27 05:36:57 -0400 |
commit | 45e15bb734e4e559ab32f26196e44d5cf1417f10 (patch) | |
tree | e71322bc295822ad97af55b0298668eab093cfec | |
parent | c213f286f2cf6590f83f541f66a625ee8d20c6f4 (diff) |
ibmveth: fix bad UDP checksums
This patch fixes a ibmveth bug where bad UDP checksums are being transmitted
when checksum offloading is enabled.
The hypervisor does checksum offloading only on TCP packets, so ibmveth calls
skb_checksum_help() for any other protocol. The bug happens because
the packet is being modified after the DMA map, so we would need a memory
barrier before making the hypervisor call. Reordering the code so that the
DMA map happens after skb_checksum_help() has the additional advantage of
fixing a DMA map leak if skb_checksum_help() where to fail.
Signed-off-by: Santiago Leon <santil@us.ibm.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r-- | drivers/net/ibmveth.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index a03fe1fb61c..c2d57f83608 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned long data_dma_addr; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; - data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { @@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) buf[1] = 0; } + data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { if (!firmware_has_feature(FW_FEATURE_CMO)) ibmveth_error_printk("tx: unable to map xmit buffer\n"); @@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) desc.fields.address = adapter->bounce_buffer_dma; tx_map_failed++; used_bounce = 1; + wmb(); } else desc.fields.address = data_dma_addr; |