summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/nvidia/forcedeth.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-21 11:37:57 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-21 11:37:57 -0800
commitad2e6329666650d9cafcae9ef53fbe09ea759ae2 (patch)
treeb23eb258fa609be246bb44cd9b33ff86d8142ca7 /drivers/net/ethernet/nvidia/forcedeth.c
parent1ee0a224bc9aad1de496c795f96bc6ba2c394811 (diff)
parentb810075002c9f25a6da83cecda39d789000a04a9 (diff)
Merge tag 'fixes-for-v3.8-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus
Felipe writes: usb: fixes for v3.8-rc5 Finally we have a build fix for fsl-mxc-udc UDC driver. We also have a fix for ep0 maxburst setting on DWC3 which could confuse the HW if we tell it we had way too many streams on that endpoint when it _has_ to be only one. cppi_dma support for MUSB got a fix when running as a module. By dropping the wrong __init annotation, the function will be available even when we're modules and we're done with .init.text section. Last, but not least, we have a fix on FunctionFS which was causing a bug on our option parsing algorithm.
Diffstat (limited to 'drivers/net/ethernet/nvidia/forcedeth.c')
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 653487dc7b5..87fa5919c45 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
wmb();
@@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
@@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -5003,6 +5033,11 @@ static int nv_loopback_test(struct net_device *dev)
test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
skb_tailroom(tx_skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ test_dma_addr)) {
+ dev_kfree_skb_any(tx_skb);
+ goto out;
+ }
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);