summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 13:16:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 13:16:37 -0700
commita66a50054e46ec2a03244bc14c48b9125fcd75a7 (patch)
treeab549e4762a89d76846f104162d8f65b52c41a69 /arch/sparc/kernel/ioport.c
parent7193bea53f9d9730bbc859777c2f86c76349914d (diff)
parent695a461296e5df148c99ac087b9e1cb380f4db15 (diff)
Merge branch 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (59 commits) x86/gart: Do not select AGP for GART_IOMMU x86/amd-iommu: Initialize passthrough mode when requested x86/amd-iommu: Don't detach device from pt domain on driver unbind x86/amd-iommu: Make sure a device is assigned in passthrough mode x86/amd-iommu: Align locking between attach_device and detach_device x86/amd-iommu: Fix device table write order x86/amd-iommu: Add passthrough mode initialization functions x86/amd-iommu: Add core functions for pd allocation/freeing x86/dma: Mark iommu_pass_through as __read_mostly x86/amd-iommu: Change iommu_map_page to support multiple page sizes x86/amd-iommu: Support higher level PTEs in iommu_page_unmap x86/amd-iommu: Remove old page table handling macros x86/amd-iommu: Use 2-level page tables for dma_ops domains x86/amd-iommu: Remove bus_addr check in iommu_map_page x86/amd-iommu: Remove last usages of IOMMU_PTE_L0_INDEX x86/amd-iommu: Change alloc_pte to support 64 bit address space x86/amd-iommu: Introduce increase_address_space function x86/amd-iommu: Flush domains if address space size was increased x86/amd-iommu: Introduce set_dte_entry function x86/amd-iommu: Add a gneric version of amd_iommu_flush_all_devices ...
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c190
1 files changed, 105 insertions, 85 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d97..edbea232c61 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -48,8 +48,6 @@
#include <asm/iommu.h>
#include <asm/io-unit.h>
-#include "dma.h"
-
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
* Typically devices use them for control blocks.
* CPU may access them without any explicit flushing.
*/
-void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
+static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
struct of_device *op = to_of_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +298,8 @@ err_nopages:
return NULL;
}
-void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
+static void sbus_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
struct page *pgv;
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
n = (n + PAGE_SIZE-1) & PAGE_MASK;
if ((res->end-res->start)+1 != n) {
- printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
+ printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
return;
}
@@ -337,8 +337,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary.
*/
-dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
+static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *va = page_address(page) + offset;
+
/* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return 0;
@@ -350,12 +355,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
return mmu_get_scsi_one(dev, va, len);
}
-void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
+static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_one(dev, ba, n);
}
-int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
@@ -366,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
return n;
}
-void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_sgl(dev, sg, n);
}
-void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
+static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
+ BUG();
}
-void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
+static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
+ BUG();
}
+struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+ .unmap_page = sbus_unmap_page,
+ .map_sg = sbus_map_sg,
+ .unmap_sg = sbus_unmap_sg,
+ .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
+ .sync_sg_for_device = sbus_sync_sg_for_device,
+};
+
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -395,7 +421,8 @@ arch_initcall(sparc_register_ioport);
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
-void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
+static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *pba, gfp_t gfp)
{
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
@@ -439,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
}
-EXPORT_SYMBOL(pci_alloc_consistent);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
-void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
+static void pci32_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
unsigned long pgp;
@@ -481,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
free_pages(pgp, get_order(n));
}
-EXPORT_SYMBOL(pci_free_consistent);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single_* is performed.
- */
-dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
- int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- /* IIep is write-through, not flushing. */
- return virt_to_phys(ptr);
-}
-EXPORT_SYMBOL(pci_map_single);
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
- int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
- }
-}
-EXPORT_SYMBOL(pci_unmap_single);
/*
* Same as pci_map_single, but with pages.
*/
-dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(pci_map_page);
-
-void pci_unmap_page(struct pci_dev *hwdev,
- dma_addr_t dma_address, size_t size, int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- /* mmu_inval_dma_area XXX */
-}
-EXPORT_SYMBOL(pci_unmap_page);
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
@@ -551,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page);
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
return nents;
}
-EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -588,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
}
}
-EXPORT_SYMBOL(pci_unmap_sg);
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer.
@@ -600,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
* must first perform a pci_dma_sync_for_device, and then the
* device again owns the buffer.
*/
-void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
-EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
-void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
-EXPORT_SYMBOL(pci_dma_sync_single_for_device);
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
@@ -626,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -641,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
}
}
}
-EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -658,9 +638,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
}
}
}
-EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
+
+struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+ .map_sg = pci32_map_sg,
+ .unmap_sg = pci32_unmap_sg,
+ .sync_single_for_cpu = pci32_sync_single_for_cpu,
+ .sync_single_for_device = pci32_sync_single_for_device,
+ .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
+ .sync_sg_for_device = pci32_sync_sg_for_device,
+};
+EXPORT_SYMBOL(pci32_dma_ops);
+
#endif /* CONFIG_PCI */
+/*
+ * Return whether the given PCI device DMA address mask can be
+ * supported properly. For example, if your device can only drive the
+ * low 24-bits during PCI bus mastering, then you would pass
+ * 0x00ffffff as the mask to this function.
+ */
+int dma_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return 1;
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+#ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+#endif
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
+
#ifdef CONFIG_PROC_FS
static int