diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:43:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:43:13 -0700 |
commit | 951cc93a7493a81a47e20231441bc6cf17c98a37 (patch) | |
tree | f53934f0f225e0215a85c8c59af4c6513e89e3f1 /drivers/net/wireless/b43/dma.c | |
parent | a7e1aabb28e8154ce987b622fd78d80a1ca39361 (diff) | |
parent | 415b3334a21aa67806c52d1acf4e72e14f7f402f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1287 commits)
icmp: Fix regression in nexthop resolution during replies.
net: Fix ppc64 BPF JIT dependencies.
acenic: include NET_SKB_PAD headroom to incoming skbs
ixgbe: convert to ndo_fix_features
ixgbe: only enable WoL for magic packet by default
ixgbe: remove ifdef check for non-existent define
ixgbe: Pass staterr instead of re-reading status and error bits from descriptor
ixgbe: Move interrupt related values out of ring and into q_vector
ixgbe: add structure for containing RX/TX rings to q_vector
ixgbe: inline the ixgbe_maybe_stop_tx function
ixgbe: Update ATR to use recorded TX queues instead of CPU for routing
igb: Fix for DH89xxCC near end loopback test
e1000: always call e1000_check_for_link() on e1000_ce4100 MACs.
netxen: add fw version compatibility check
be2net: request native mode each time the card is reset
ipv4: Constrain UFO fragment sizes to multiples of 8 bytes
virtio_net: Fix panic in virtnet_remove
ipv6: make fragment identifications less predictable
ipv6: unshare inetpeers
can: make function can_get_bittiming static
...
Diffstat (limited to 'drivers/net/wireless/b43/dma.c')
-rw-r--r-- | drivers/net/wireless/b43/dma.c | 48 |
1 files changed, 28 insertions, 20 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 47d44bcff37..ce572aebeff 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -333,10 +333,10 @@ static inline dma_addr_t dmaaddr; if (tx) { - dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); } else { - dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, + dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); } @@ -348,10 +348,10 @@ static inline dma_addr_t addr, size_t len, int tx) { if (tx) { - dma_unmap_single(ring->dev->sdev->dma_dev, + dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); } else { - dma_unmap_single(ring->dev->sdev->dma_dev, + dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } } @@ -361,7 +361,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - dma_sync_single_for_cpu(ring->dev->sdev->dma_dev, + dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -370,7 +370,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - dma_sync_single_for_device(ring->dev->sdev->dma_dev, + dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; - ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev, + ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, &(ring->dmabase), flags); if (!ring->descbase) { @@ -415,7 +415,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) static void free_ringmemory(struct b43_dmaring *ring) { - dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE, + dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } @@ -523,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { - if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr))) + if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { @@ -757,14 +757,14 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring) static void free_all_descbuffers(struct b43_dmaring *ring) { - struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { - desc = ring->ops->idx2desc(ring, i, &meta); + /* get meta - ignore returned value */ + ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); @@ -869,7 +869,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ - dma_test = dma_map_single(dev->sdev->dma_dev, + dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); @@ -884,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, if (!ring->txhdr_cache) goto err_kfree_meta; - dma_test = dma_map_single(dev->sdev->dma_dev, + dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); @@ -898,7 +898,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, } } - dma_unmap_single(dev->sdev->dma_dev, + dma_unmap_single(dev->dev->dma_dev, dma_test, b43_txhdr_size(dev), DMA_TO_DEVICE); } @@ -1013,9 +1013,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = dma_set_mask(dev->sdev->dma_dev, mask); + err = dma_set_mask(dev->dev->dma_dev, mask); if (!err) { - err = dma_set_coherent_mask(dev->sdev->dma_dev, mask); + err = dma_set_coherent_mask(dev->dev->dma_dev, mask); if (!err) break; } @@ -1055,7 +1055,14 @@ int b43_dma_init(struct b43_wldev *dev) err = b43_dma_set_mask(dev, dmamask); if (err) return err; - dma->translation = ssb_dma_translation(dev->sdev); + + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + dma->translation = ssb_dma_translation(dev->dev->sdev); + break; +#endif + } err = -ENOMEM; /* setup TX DMA channels. */ @@ -1085,7 +1092,7 @@ int b43_dma_init(struct b43_wldev *dev) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ - B43_WARN_ON(dev->sdev->id.revision < 5); + B43_WARN_ON(dev->dev->core_rev < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); @@ -1388,7 +1395,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, { const struct b43_dma_ops *ops; struct b43_dmaring *ring; - struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; @@ -1416,7 +1422,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); - desc = ops->idx2desc(ring, slot, &meta); + /* get meta - ignore returned value */ + ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " @@ -1600,6 +1607,7 @@ void b43_dma_rx(struct b43_dmaring *ring) dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } + wmb(); ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } |