diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-26 08:22:50 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-26 08:22:50 +0100 |
commit | 7fbb8759eff9a348efa5f352ffaa51c364837c4b (patch) | |
tree | d40cd3f47b9f667ba94d9613270132080dcb6a1a /arch/powerpc/kernel/dma.c | |
parent | 6003ab0bad4cc56f3c4fadf62a0d23a967b9c53b (diff) | |
parent | 13d428afc007fcfcd6deeb215618f54cf9c0cae6 (diff) |
Merge commit 'v2.6.28-rc6' into core/debug
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c new file mode 100644 index 00000000000..1562daf8839 --- /dev/null +++ b/arch/powerpc/kernel/dma.c @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation + * + * Provide default implementations of the DMA mapping callbacks for + * directly mapped busses. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <asm/bug.h> +#include <asm/abs_addr.h> + +/* + * Generic direct DMA implementation + * + * This implementation supports a per-device offset that can be applied if + * the address at which memory is visible to devices is not 0. Platform code + * can set archdata.dma_data to an unsigned long holding the offset. By + * default the offset is PCI_DRAM_OFFSET. + */ + +static unsigned long get_dma_direct_offset(struct device *dev) +{ + if (dev) + return (unsigned long)dev->archdata.dma_data; + + return PCI_DRAM_OFFSET; +} + +void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret; +#ifdef CONFIG_NOT_COHERENT_CACHE + ret = __dma_alloc_coherent(size, dma_handle, flag); + if (ret == NULL) + return NULL; + *dma_handle += get_dma_direct_offset(dev); + return ret; +#else + struct page *page; + int node = dev_to_node(dev); + + /* ignore region specifiers */ + flag &= ~(__GFP_HIGHMEM); + + page = alloc_pages_node(node, flag, get_order(size)); + if (page == NULL) + return NULL; + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); + + return ret; +#endif +} + +void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ +#ifdef CONFIG_NOT_COHERENT_CACHE + __dma_free_coherent(size, vaddr); +#else + free_pages((unsigned long)vaddr, get_order(size)); +#endif +} + +static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); + sg->dma_length = sg->length; + } + + return nents; +} + +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +} + +static int dma_direct_dma_supported(struct device *dev, u64 mask) +{ +#ifdef CONFIG_PPC64 + /* Could be improved to check for memory though it better be + * done via some global so platforms can set the limit in case + * they have limited DMA windows + */ + return mask >= DMA_32BIT_MASK; +#else + return 1; +#endif +} + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + BUG_ON(dir == DMA_NONE); + __dma_sync_page(page, offset, size, dir); + return page_to_phys(page) + offset + get_dma_direct_offset(dev); +} + +static inline void dma_direct_unmap_page(struct device *dev, + dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +} + +struct dma_mapping_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .dma_supported = dma_direct_dma_supported, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, +}; +EXPORT_SYMBOL(dma_direct_ops); |