summaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa/dma-mapping.h
diff options
context:
space:
mode:
authorChris Zankel <czankel@tensilica.com>2005-06-23 22:01:26 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-24 00:05:22 -0700
commit9a8fd5589902153a134111ed7a40f9cca1f83254 (patch)
tree6f7a06de25bdf0b2d94623794c2cbbc66b5a77f6 /include/asm-xtensa/dma-mapping.h
parent3f65ce4d141e435e54c20ed2379d983d362a2cb5 (diff)
[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 6
The attached patches provides part 6 of an architecture implementation for the Tensilica Xtensa CPU series. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-xtensa/dma-mapping.h')
-rw-r--r--include/asm-xtensa/dma-mapping.h182
1 files changed, 182 insertions, 0 deletions
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
new file mode 100644
index 00000000000..e86a206f120
--- /dev/null
+++ b/include/asm-xtensa/dma-mapping.h
@@ -0,0 +1,182 @@
+/*
+ * include/asm-xtensa/dma_mapping.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_MAPPING_H
+#define _XTENSA_DMA_MAPPING_H
+
+#include <asm/scatterlist.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+
+/*
+ * DMA-consistent mapping functions.
+ */
+
+extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
+extern void consistent_free(void*, size_t, dma_addr_t);
+extern void consistent_sync(void*, size_t, int);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ consistent_sync(ptr, size, direction);
+ return virt_to_phys(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++, sg++ ) {
+ BUG_ON(!sg->page);
+
+ sg->dma_address = page_to_phys(sg->page) + sg->offset;
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, direction);
+ }
+
+ return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+
+ consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+
+ consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
+}
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, dir);
+}
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if(!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ return L1_CACHE_BYTES;
+}
+
+#define dma_is_consistent(d) (1)
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync(vaddr, size, direction);
+}
+
+#endif /* _XTENSA_DMA_MAPPING_H */