diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-26 09:50:51 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-26 09:50:51 +0900 |
commit | f32154c9b580f11017b01bf093514c900c09364e (patch) | |
tree | 3ec811bc69fd2e562bd9000c323fc3ae1584ce68 | |
parent | 73c926bee0e4b7739bbb992a0a3df561178dd522 (diff) |
sh: Add dma-mapping support for dma_alloc/free_coherent() overrides.
This moves the current dma_alloc/free_coherent() calls to a generic
variant and plugs them in for the nommu default. Other variants can
override the defaults in the dma mapping ops directly.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/include/asm/dma-mapping.h | 48 | ||||
-rw-r--r-- | arch/sh/kernel/dma-nommu.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 22 |
3 files changed, 47 insertions, 25 deletions
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index b9a8f18f35a..653076018df 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#include <asm-generic/dma-coherent.h> +#include <asm-generic/dma-mapping-common.h> + static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); @@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -#include <asm-generic/dma-coherent.h> -#include <asm-generic/dma-mapping-common.h> +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) + return memory; + if (!ops->alloc_coherent) + return NULL; + + memory = ops->alloc_coherent(dev, size, dma_handle, gfp); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); + + return memory; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + WARN_ON(irqs_disabled()); /* for portability */ + + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; + + debug_dma_free_coherent(dev, size, vaddr, dma_handle); + if (ops->free_coherent) + ops->free_coherent(dev, size, vaddr, dma_handle); +} + +/* arch/sh/mm/consistent.c */ +extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag); +extern void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index e88fcebf860..b336fcf40f1 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, } struct dma_map_ops nommu_dma_ops = { + .alloc_coherent = dma_generic_alloc_coherent, + .free_coherent = dma_generic_free_coherent, .map_page = nommu_map_page, .map_sg = nommu_map_sg, .sync_single_for_device = nommu_sync_single, diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 1165161e472..ef20bbabefa 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -33,15 +33,12 @@ static int __init dma_init(void) } fs_initcall(dma_init); -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) { void *ret, *ret_nocache; int order = get_order(size); - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - ret = (void *)__get_free_pages(gfp, order); if (!ret) return NULL; @@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, *dma_handle = virt_to_phys(ret); - debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); - return ret_nocache; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { int order = get_order(size); unsigned long pfn = dma_handle >> PAGE_SHIFT; int k; - WARN_ON(irqs_disabled()); /* for portability */ - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dma_handle); for (k = 0; k < (1 << order); k++) __free_pages(pfn_to_page(pfn + k), 0); + iounmap(vaddr); } -EXPORT_SYMBOL(dma_free_coherent); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) |