diff options
author | Jesper Nilsson <jesper.nilsson@axis.com> | 2008-10-21 17:45:58 +0200 |
---|---|---|
committer | Jesper Nilsson <jesper.nilsson@axis.com> | 2008-10-29 17:29:44 +0100 |
commit | 556dcee7b829e5c350c3ffdbdb87a8b15aa3c5d3 (patch) | |
tree | 26485b0d92eedcba6c0c96d4069469041aaf7106 /arch/cris/include/asm/dma-mapping.h | |
parent | 242bfafc8e42da4697c1e2dea108049d14dbac4b (diff) |
[CRIS] Move header files from include to arch/cris/include.
Change all users of header files to correct path.
Remove some unneeded headers for arch-v32.
Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>
Diffstat (limited to 'arch/cris/include/asm/dma-mapping.h')
-rw-r--r-- | arch/cris/include/asm/dma-mapping.h | 170 |
1 files changed, 170 insertions, 0 deletions
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h new file mode 100644 index 00000000000..da8ef8e8f84 --- /dev/null +++ b/arch/cris/include/asm/dma-mapping.h @@ -0,0 +1,170 @@ +/* DMA mapping. Nothing tricky here, just virt_to_phys */ + +#ifndef _ASM_CRIS_DMA_MAPPING_H +#define _ASM_CRIS_DMA_MAPPING_H + +#include <linux/mm.h> +#include <linux/kernel.h> + +#include <asm/cache.h> +#include <asm/io.h> +#include <asm/scatterlist.h> + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +#ifdef CONFIG_PCI +#include <asm-generic/dma-coherent.h> + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); +#else +static inline void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag) +{ + BUG(); + return NULL; +} + +static inline void +dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + BUG(); +} +#endif +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + printk("Map sg\n"); + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline int +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if(mask < 0x00ffffff) + return 0; + + return 1; +} + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +static inline int +dma_get_cache_alignment(void) +{ + return (1 << INTERNODE_CACHE_SHIFT); +} + +#define dma_is_consistent(d, h) (1) + +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} + + +#endif |