summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2010-03-11 14:15:48 +0100
committerMichal Simek <monstr@monstr.eu>2010-03-11 14:15:48 +0100
commit1be53e084a5bd8f59850348e1066d25aa0200031 (patch)
treebd8c92ae855b4624872e364c1279bf4252dddeba /arch/microblaze/kernel
parent3a0d7a4dd5b3a6545e5764735b48ab84e64af723 (diff)
microblaze: Fix dma alloc and free coherent dma functions
We have to use consistent code to be able to do coherent dma function. In consistent code is used cache inhibit page mapping. Xilinx reported that there is bug in Microblaze for WB and d-cache_always use option. Microblaze 7.30.a should be first version where is this bug removed. Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/dma.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index fbe1e8184ef..b1084974fcc 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -43,9 +43,14 @@ static unsigned long get_dma_direct_offset(struct device *dev)
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
}
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+#define NOT_COHERENT_CACHE
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
+#ifdef NOT_COHERENT_CACHE
+ return consistent_alloc(flag, size, dma_handle);
+#else
void *ret;
struct page *page;
int node = dev_to_node(dev);
@@ -61,12 +66,17 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
return ret;
+#endif
}
-void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
+#ifdef NOT_COHERENT_CACHE
+ consistent_free(vaddr);
+#else
free_pages((unsigned long)vaddr, get_order(size));
+#endif
}
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -105,7 +115,6 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- BUG_ON(direction == DMA_NONE);
__dma_sync_page(page_to_phys(page), offset, size, direction);
return page_to_phys(page) + offset + get_dma_direct_offset(dev);
}
@@ -121,7 +130,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
- __dma_sync_page((void *)dma_address, 0 , size, direction);
+ __dma_sync_page(dma_address, 0 , size, direction);
}
struct dma_map_ops dma_direct_ops = {