summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-06-11 11:34:06 -0700
committerDavid S. Miller <davem@davemloft.net>2010-06-11 11:34:06 -0700
commit14599f1e341ee219abdd15f4eee5872d6f2d29f1 (patch)
tree3875181429010e58416ab34e6c06ef42de52e756 /drivers/net/wireless/b43
parentd8d1f30b95a635dbd610dcc5eb641aca8f4768cf (diff)
parent832c10fd733893f86c63bde1c65b005d5a2fe346 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Conflicts: drivers/net/wireless/wl12xx/wl1271.h drivers/net/wireless/wl12xx/wl1271_cmd.h
Diffstat (limited to 'drivers/net/wireless/b43')
-rw-r--r--drivers/net/wireless/b43/dma.c69
1 files changed, 36 insertions, 33 deletions
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fa40fdfea71..10d0aaf754c 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@ static inline
dma_addr_t dmaaddr;
if (tx) {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_TO_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_TO_DEVICE);
} else {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_FROM_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_FROM_DEVICE);
}
return dmaaddr;
@@ -348,11 +348,11 @@ static inline
dma_addr_t addr, size_t len, int tx)
{
if (tx) {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_TO_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_TO_DEVICE);
} else {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
}
@@ -361,7 +361,7 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_cpu(ring->dev->dev,
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
@@ -370,8 +370,8 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
*/
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, flags);
+ dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{
- if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
}
}
- ssb_dma_unmap_single(dev->dev,
- dma_test, b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev->dma_dev,
+ dma_test, b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
}
err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = ssb_dma_set_mask(dev->dev, mask);
- if (!err)
- break;
+ err = dma_set_mask(dev->dev->dma_dev, mask);
+ if (!err) {
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
+ }
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = 1;
@@ -1221,14 +1224,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
+ priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
+ GFP_ATOMIC | GFP_DMA);
if (!priv_info->bouncebuffer) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
- memcpy(priv_info->bouncebuffer, skb->data, skb->len);
meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {