From 9bd568a50c446433038dec2a5186c5c57c3dbd23 Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Wed, 18 Nov 2009 20:53:05 +0100 Subject: b43: Enforce DMA descriptor memory constraints Enforce all device constraints on the descriptor memory region. There are several constraints on the descriptor memory, as documented in the specification. The current code does not enforce them and/or incorrectly enforces them. Those constraints are: - The address limitations on 30/32bit engines, that also apply to the skbs. - The 4k alignment requirement on 30/32bit engines. - The 8k alignment requirement on 64bit engines. Signed-off-by: Michael Buesch Signed-off-by: John W. Linville --- drivers/net/wireless/b43/dma.c | 197 +++++++++++++++++++++++++++++++---------- 1 file changed, 152 insertions(+), 45 deletions(-) (limited to 'drivers/net/wireless/b43/dma.c') diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index b5cd7f57055..18b97c02b8a 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -383,44 +383,160 @@ static inline } } +/* Check if a DMA region fits the device constraints. + * Returns true, if the region is OK for usage with this device. */ +static inline bool b43_dma_address_ok(struct b43_dmaring *ring, + dma_addr_t addr, size_t size) +{ + switch (ring->type) { + case B43_DMA_30BIT: + if ((u64)addr + size > (1ULL << 30)) + return 0; + break; + case B43_DMA_32BIT: + if ((u64)addr + size > (1ULL << 32)) + return 0; + break; + case B43_DMA_64BIT: + /* Currently we can't have addresses beyond + * 64bit in the kernel. */ + break; + } + return 1; +} + +#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0) +#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0) + +static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base, + dma_addr_t dmaaddr, size_t size) +{ + ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE); + free_pages((unsigned long)base, get_order(size)); +} + +static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring, + dma_addr_t *dmaaddr, size_t size, + gfp_t gfp_flags) +{ + void *base; + + base = (void *)__get_free_pages(gfp_flags, get_order(size)); + if (!base) + return NULL; + memset(base, 0, size); + *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size, + DMA_TO_DEVICE); + if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) { + free_pages((unsigned long)base, get_order(size)); + return NULL; + } + + return base; +} + +static void * b43_get_and_map_ringmem(struct b43_dmaring *ring, + dma_addr_t *dmaaddr, size_t size) +{ + void *base; + + base = __b43_get_and_map_ringmem(ring, dmaaddr, size, + GFP_KERNEL); + if (!base) { + b43err(ring->dev->wl, "Failed to allocate or map pages " + "for DMA ringmemory\n"); + return NULL; + } + if (!b43_dma_address_ok(ring, *dmaaddr, size)) { + /* The memory does not fit our device constraints. + * Retry with GFP_DMA set to get lower memory. */ + b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); + base = __b43_get_and_map_ringmem(ring, dmaaddr, size, + GFP_KERNEL | GFP_DMA); + if (!base) { + b43err(ring->dev->wl, "Failed to allocate or map pages " + "in the GFP_DMA region for DMA ringmemory\n"); + return NULL; + } + if (!b43_dma_address_ok(ring, *dmaaddr, size)) { + b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); + b43err(ring->dev->wl, "Failed to allocate DMA " + "ringmemory that fits device constraints\n"); + return NULL; + } + } + /* We expect the memory to be 4k aligned, at least. */ + if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) { + b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size); + return NULL; + } + + return base; +} + static int alloc_ringmemory(struct b43_dmaring *ring) { - gfp_t flags = GFP_KERNEL; - - /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K - * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing - * has shown that 4K is sufficient for the latter as long as the buffer - * does not cross an 8K boundary. - * - * For unknown reasons - possibly a hardware error - the BCM4311 rev - * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, - * which accounts for the GFP_DMA flag below. - * - * The flags here must match the flags in free_ringmemory below! + unsigned int required; + void *base; + dma_addr_t dmaaddr; + + /* There are several requirements to the descriptor ring memory: + * - The memory region needs to fit the address constraints for the + * device (same as for frame buffers). + * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned. + * - For 64bit DMA devices, the descriptor ring must be 8k aligned. */ + if (ring->type == B43_DMA_64BIT) - flags |= GFP_DMA; - ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, - B43_DMA_RINGMEMSIZE, - &(ring->dmabase), flags); - if (!ring->descbase) { - b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); + required = ring->nr_slots * sizeof(struct b43_dmadesc64); + else + required = ring->nr_slots * sizeof(struct b43_dmadesc32); + if (B43_WARN_ON(required > 0x1000)) + return -ENOMEM; + + ring->alloc_descsize = 0x1000; + base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); + if (!base) + return -ENOMEM; + ring->alloc_descbase = base; + ring->alloc_dmabase = dmaaddr; + + if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) { + /* We're on <=32bit DMA, or we already got 8k aligned memory. + * That's all we need, so we're fine. */ + ring->descbase = base; + ring->dmabase = dmaaddr; + return 0; + } + b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize); + + /* Ok, we failed at the 8k alignment requirement. + * Try to force-align the memory region now. */ + ring->alloc_descsize = 0x2000; + base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize); + if (!base) return -ENOMEM; + ring->alloc_descbase = base; + ring->alloc_dmabase = dmaaddr; + + if (is_8k_aligned(dmaaddr)) { + /* We're already 8k aligned. That Ok, too. */ + ring->descbase = base; + ring->dmabase = dmaaddr; + return 0; } - memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); + /* Force-align it to 8k */ + ring->descbase = (void *)((u8 *)base + 0x1000); + ring->dmabase = dmaaddr + 0x1000; + B43_WARN_ON(!is_8k_aligned(ring->dmabase)); return 0; } static void free_ringmemory(struct b43_dmaring *ring) { - gfp_t flags = GFP_KERNEL; - - if (ring->type == B43_DMA_64BIT) - flags |= GFP_DMA; - - ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, - ring->descbase, ring->dmabase, flags); + b43_unmap_and_free_ringmem(ring, ring->alloc_descbase, + ring->alloc_dmabase, ring->alloc_descsize); } /* Reset the RX DMA channel */ @@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) return 1; - switch (ring->type) { - case B43_DMA_30BIT: - if ((u64)addr + buffersize > (1ULL << 30)) - goto address_error; - break; - case B43_DMA_32BIT: - if ((u64)addr + buffersize > (1ULL << 32)) - goto address_error; - break; - case B43_DMA_64BIT: - /* Currently we can't have addresses beyond - * 64bit in the kernel. */ - break; + if (!b43_dma_address_ok(ring, addr, buffersize)) { + /* We can't support this address. Unmap it again. */ + unmap_descbuffer(ring, addr, buffersize, dma_to_device); + return 1; } /* The address is OK. */ return 0; - -address_error: - /* We can't support this address. Unmap it again. */ - unmap_descbuffer(ring, addr, buffersize, dma_to_device); - - return 1; } static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) @@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); + ssb_dma_sync_single_for_device(ring->dev->dev, + ring->alloc_dmabase, + ring->alloc_descsize, DMA_TO_DEVICE); return 0; } @@ -1246,6 +1350,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring, } /* Now transfer the whole frame. */ wmb(); + ssb_dma_sync_single_for_device(ring->dev->dev, + ring->alloc_dmabase, + ring->alloc_descsize, DMA_TO_DEVICE); ops->poke_tx(ring, next_slot(ring, slot)); return 0; -- cgit v1.2.3-70-g09d2